1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
|
import*as e from"chrome://global/content/ml/ort-dev.js";var t={"onnxruntime-web":
/*!********************************************************!*\
!*** external "chrome://global/content/ml/ort-dev.js" ***!
\********************************************************/t=>{t.exports=e},"?7a2c":
/*!********************!*\
!*** fs (ignored) ***!
\********************/()=>{},"?a42a":
/*!**********************!*\
!*** path (ignored) ***!
\**********************/()=>{},"?2b25":
/*!***********************!*\
!*** sharp (ignored) ***!
\***********************/()=>{},"?e65c":
/*!****************************!*\
!*** stream/web (ignored) ***!
\****************************/()=>{},"?569f":
/*!********************!*\
!*** fs (ignored) ***!
\********************/()=>{},"?3f59":
/*!**********************!*\
!*** path (ignored) ***!
\**********************/()=>{},"?154a":
/*!*********************!*\
!*** url (ignored) ***!
\*********************/()=>{},"./node_modules/@huggingface/jinja/dist/index.js":
/*!*******************************************************!*\
!*** ./node_modules/@huggingface/jinja/dist/index.js ***!
\*******************************************************/(e,t,s)=>{s.r(t),s.d(t,{Environment:()=>X,Interpreter:()=>Q,Template:()=>Y,parse:()=>z,tokenize:()=>d});var o=Object.freeze({Text:"Text",NumericLiteral:"NumericLiteral",BooleanLiteral:"BooleanLiteral",StringLiteral:"StringLiteral",Identifier:"Identifier",Equals:"Equals",OpenParen:"OpenParen",CloseParen:"CloseParen",OpenStatement:"OpenStatement",CloseStatement:"CloseStatement",OpenExpression:"OpenExpression",CloseExpression:"CloseExpression",OpenSquareBracket:"OpenSquareBracket",CloseSquareBracket:"CloseSquareBracket",OpenCurlyBracket:"OpenCurlyBracket",CloseCurlyBracket:"CloseCurlyBracket",Comma:"Comma",Dot:"Dot",Colon:"Colon",Pipe:"Pipe",CallOperator:"CallOperator",AdditiveBinaryOperator:"AdditiveBinaryOperator",MultiplicativeBinaryOperator:"MultiplicativeBinaryOperator",ComparisonBinaryOperator:"ComparisonBinaryOperator",UnaryOperator:"UnaryOperator",Set:"Set",If:"If",For:"For",In:"In",Is:"Is",NotIn:"NotIn",Else:"Else",EndIf:"EndIf",ElseIf:"ElseIf",EndFor:"EndFor",And:"And",Or:"Or",Not:"UnaryOperator"}),n=Object.freeze({set:o.Set,for:o.For,in:o.In,is:o.Is,if:o.If,else:o.Else,endif:o.EndIf,elif:o.ElseIf,endfor:o.EndFor,and:o.And,or:o.Or,not:o.Not,"not in":o.NotIn,true:o.BooleanLiteral,false:o.BooleanLiteral}),r=class{constructor(e,t){this.value=e,this.type=t}};function a(e){return/\w/.test(e)}function i(e){return/[0-9]/.test(e)}var l=[["{%",o.OpenStatement],["%}",o.CloseStatement],["{{",o.OpenExpression],["}}",o.CloseExpression],["(",o.OpenParen],[")",o.CloseParen],["{",o.OpenCurlyBracket],["}",o.CloseCurlyBracket],["[",o.OpenSquareBracket],["]",o.CloseSquareBracket],[",",o.Comma],[".",o.Dot],[":",o.Colon],["|",o.Pipe],["<=",o.ComparisonBinaryOperator],[">=",o.ComparisonBinaryOperator],["==",o.ComparisonBinaryOperator],["!=",o.ComparisonBinaryOperator],["<",o.ComparisonBinaryOperator],[">",o.ComparisonBinaryOperator],["+",o.AdditiveBinaryOperator],["-",o.AdditiveBinaryOperator],["*",o.MultiplicativeBinaryOperator],["/",o.MultiplicativeBinaryOperator],["%",o.MultiplicativeBinaryOperator],["=",o.Equals]],c=new Map([["n","\n"],["t","\t"],["r","\r"],["b","\b"],["f","\f"],["v","\v"],["'","'"],['"','"'],["\\","\\"]]);function d(e,t={}){const s=[],d=function(e,t={}){return e.endsWith("\n")&&(e=e.slice(0,-1)),e=e.replace(/{#.*?#}/gs,"{##}"),t.lstrip_blocks&&(e=e.replace(/^[ \t]*({[#%])/gm,"$1")),t.trim_blocks&&(e=e.replace(/([#%]})\n/g,"$1")),e.replace(/{##}/g,"").replace(/-%}\s*/g,"%}").replace(/\s*{%-/g,"{%").replace(/-}}\s*/g,"}}").replace(/\s*{{-/g,"{{")}(e,t);let u=0;const h=e=>{let t="";for(;e(d[u]);)if("\\"!==d[u]){if(t+=d[u++],u>=d.length)throw new SyntaxError("Unexpected end of input")}else{if(++u,u>=d.length)throw new SyntaxError("Unexpected end of input");const e=d[u++],s=c.get(e);if(void 0===s)throw new SyntaxError(`Unexpected escaped character: ${e}`);t+=s}return t};e:for(;u<d.length;){const e=s.at(-1)?.type;if(void 0===e||e===o.CloseStatement||e===o.CloseExpression){let e="";for(;u<d.length&&("{"!==d[u]||"%"!==d[u+1]&&"{"!==d[u+1]);)e+=d[u++];if(e.length>0){s.push(new r(e,o.Text));continue}}h((e=>/\s/.test(e)));const t=d[u];if("-"===t||"+"===t){const e=s.at(-1)?.type;if(e===o.Text||void 0===e)throw new SyntaxError(`Unexpected character: ${t}`);switch(e){case o.Identifier:case o.NumericLiteral:case o.BooleanLiteral:case o.StringLiteral:case o.CloseParen:case o.CloseSquareBracket:break;default:{++u;const e=h(i);s.push(new r(`${t}${e}`,e.length>0?o.NumericLiteral:o.UnaryOperator));continue}}}for(const[e,t]of l){if(d.slice(u,u+e.length)===e){s.push(new r(e,t)),u+=e.length;continue e}}if("'"!==t&&'"'!==t)if(i(t)){const e=h(i);s.push(new r(e,o.NumericLiteral))}else{if(!a(t))throw new SyntaxError(`Unexpected character: ${t}`);{const e=h(a),t=Object.hasOwn(n,e)?n[e]:o.Identifier;t===o.In&&s.at(-1)?.type===o.Not?(s.pop(),s.push(new r("not in",o.NotIn))):s.push(new r(e,t))}}else{++u;const e=h((e=>e!==t));s.push(new r(e,o.StringLiteral)),++u}}return s}var u=class{type="Statement"},h=class extends u{constructor(e){super(),this.body=e}type="Program"},p=class extends u{constructor(e,t,s){super(),this.test=e,this.body=t,this.alternate=s}type="If"},_=class extends u{constructor(e,t,s){super(),this.loopvar=e,this.iterable=t,this.body=s}type="For"},m=class extends u{constructor(e,t){super(),this.assignee=e,this.value=t}type="Set"},f=class extends u{type="Expression"},g=class extends f{constructor(e,t,s){super(),this.object=e,this.property=t,this.computed=s}type="MemberExpression"},M=class extends f{constructor(e,t){super(),this.callee=e,this.args=t}type="CallExpression"},w=class extends f{constructor(e){super(),this.value=e}type="Identifier"},T=class extends f{constructor(e){super(),this.value=e}type="Literal"},k=class extends T{type="NumericLiteral"},b=class extends T{type="StringLiteral"},x=class extends T{type="BooleanLiteral"},y=class extends T{type="ArrayLiteral"},F=class extends T{type="TupleLiteral"},C=class extends T{type="ObjectLiteral"},P=class extends f{constructor(e,t,s){super(),this.operator=e,this.left=t,this.right=s}type="BinaryExpression"},v=class extends f{constructor(e,t){super(),this.operand=e,this.filter=t}type="FilterExpression"},S=class extends f{constructor(e,t,s){super(),this.operand=e,this.negate=t,this.test=s}type="TestExpression"},A=class extends f{constructor(e,t){super(),this.operator=e,this.argument=t}type="UnaryExpression"},L=class extends f{constructor(e=void 0,t=void 0,s=void 0){super(),this.start=e,this.stop=t,this.step=s}type="SliceExpression"},E=class extends f{constructor(e,t){super(),this.key=e,this.value=t}type="KeywordArgumentExpression"};function z(e){const t=new h([]);let s=0;function n(t,o){const n=e[s++];if(!n||n.type!==t)throw new Error(`Parser Error: ${o}. ${n.type} !== ${t}.`);return n}function r(){switch(e[s].type){case o.Text:return new b(n(o.Text,"Expected text token").value);case o.OpenStatement:return function(){let t;switch(n(o.OpenStatement,"Expected opening statement token"),e[s].type){case o.Set:++s,t=l(),n(o.CloseStatement,"Expected closing statement token");break;case o.If:++s,t=c(),n(o.OpenStatement,"Expected {% token"),n(o.EndIf,"Expected endif token"),n(o.CloseStatement,"Expected %} token");break;case o.For:++s,t=function(){const e=d(!0);if(!(e instanceof w||e instanceof F))throw new SyntaxError(`Expected identifier/tuple for the loop variable, got ${e.type} instead`);n(o.In,"Expected `in` keyword following loop variable");const t=u();n(o.CloseStatement,"Expected closing statement token");const s=[];for(;a(o.OpenStatement,o.EndFor);)s.push(r());return new _(e,t,s)}(),n(o.OpenStatement,"Expected {% token"),n(o.EndFor,"Expected endfor token"),n(o.CloseStatement,"Expected %} token");break;default:throw new SyntaxError(`Unknown statement type: ${e[s].type}`)}return t}();case o.OpenExpression:return function(){n(o.OpenExpression,"Expected opening expression token");const e=u();return n(o.CloseExpression,"Expected closing expression token"),e}();default:throw new SyntaxError(`Unexpected token type: ${e[s].type}`)}}function a(...t){return s+t.length<=e.length&&t.some(((t,o)=>t!==e[s+o].type))}function i(...t){return s+t.length<=e.length&&t.every(((t,o)=>t===e[s+o].type))}function l(){const e=u();if(i(o.Equals)){++s;const t=l();return new m(e,t)}return e}function c(){const t=u();n(o.CloseStatement,"Expected closing statement token");const a=[],l=[];for(;e[s]?.type!==o.OpenStatement||e[s+1]?.type!==o.ElseIf&&e[s+1]?.type!==o.Else&&e[s+1]?.type!==o.EndIf;)a.push(r());if(e[s]?.type===o.OpenStatement&&e[s+1]?.type!==o.EndIf)if(++s,i(o.ElseIf))n(o.ElseIf,"Expected elseif token"),l.push(c());else for(n(o.Else,"Expected else token"),n(o.CloseStatement,"Expected closing statement token");e[s]?.type!==o.OpenStatement||e[s+1]?.type!==o.EndIf;)l.push(r());return new p(t,a,l)}function d(e=!1){const t=e?q:u,n=[t()],r=i(o.Comma);for(;r&&(++s,n.push(t()),i(o.Comma)););return r?new F(n):n[0]}function u(){return function(){const e=f();if(i(o.If)){++s;const t=f();n(o.Else,"Expected else token");const r=f();return new p(t,[e],[r])}return e}()}function f(){let t=T();for(;i(o.Or);){const o=e[s];++s;const n=T();t=new P(o,t,n)}return t}function T(){let t=z();for(;i(o.And);){const o=e[s];++s;const n=z();t=new P(o,t,n)}return t}function z(){let t;for(;i(o.Not);){const o=e[s];++s;const n=z();t=new A(o,n)}return t??function(){let t=B();for(;i(o.ComparisonBinaryOperator)||i(o.In)||i(o.NotIn);){const o=e[s];++s;const n=B();t=new P(o,t,n)}return t}()}function B(){let t=N();for(;i(o.AdditiveBinaryOperator);){const o=e[s];++s;const n=N();t=new P(o,t,n)}return t}function I(){const t=function(){let t=q();for(;i(o.Dot)||i(o.OpenSquareBracket);){const r=e[s];let a;++s;const i=r.type!==o.Dot;if(i)a=D(),n(o.CloseSquareBracket,"Expected closing square bracket");else if(a=q(),"Identifier"!==a.type)throw new SyntaxError("Expected identifier following dot operator");t=new g(t,a,i)}return t}();return i(o.OpenParen)?O(t):t}function O(e){let t=new M(e,function(){n(o.OpenParen,"Expected opening parenthesis for arguments list");const e=function(){const e=[];for(;!i(o.CloseParen);){let t=u();if(i(o.Equals)){if(++s,!(t instanceof w))throw new SyntaxError("Expected identifier for keyword argument");const e=u();t=new E(t,e)}e.push(t),i(o.Comma)&&++s}return e}();return n(o.CloseParen,"Expected closing parenthesis for arguments list"),e}());return i(o.OpenParen)&&(t=O(t)),t}function D(){const e=[];let t=!1;for(;!i(o.CloseSquareBracket);)i(o.Colon)?(e.push(void 0),++s,t=!0):(e.push(u()),i(o.Colon)&&(++s,t=!0));if(0===e.length)throw new SyntaxError("Expected at least one argument for member/slice expression");if(t){if(e.length>3)throw new SyntaxError("Expected 0-3 arguments for slice expression");return new L(...e)}return e[0]}function N(){let t=V();for(;i(o.MultiplicativeBinaryOperator);){const o=e[s];++s;const n=V();t=new P(o,t,n)}return t}function V(){let e=function(){let e=I();for(;i(o.Pipe);){++s;let t=q();if(!(t instanceof w))throw new SyntaxError("Expected identifier for the filter");i(o.OpenParen)&&(t=O(t)),e=new v(e,t)}return e}();for(;i(o.Is);){++s;const t=i(o.Not);t&&++s;let n=q();if(n instanceof x&&(n=new w(n.value.toString())),!(n instanceof w))throw new SyntaxError("Expected identifier for the test");e=new S(e,t,n)}return e}function q(){const t=e[s];switch(t.type){case o.NumericLiteral:return++s,new k(Number(t.value));case o.StringLiteral:return++s,new b(t.value);case o.BooleanLiteral:return++s,new x("true"===t.value);case o.Identifier:return++s,new w(t.value);case o.OpenParen:{++s;const t=d();if(e[s].type!==o.CloseParen)throw new SyntaxError(`Expected closing parenthesis, got ${e[s].type} instead`);return++s,t}case o.OpenSquareBracket:{++s;const e=[];for(;!i(o.CloseSquareBracket);)e.push(u()),i(o.Comma)&&++s;return++s,new y(e)}case o.OpenCurlyBracket:{++s;const e=new Map;for(;!i(o.CloseCurlyBracket);){const t=u();n(o.Colon,"Expected colon between key and value in object literal");const r=u();e.set(t,r),i(o.Comma)&&++s}return++s,new C(e)}default:throw new SyntaxError(`Unexpected token: ${t.type}`)}}for(;s<e.length;)t.body.push(r());return t}function B(e,t,s=1){void 0===t&&(t=e,e=0);const o=[];for(let n=e;n<t;n+=s)o.push(n);return o}function I(e,t,s,o=1){const n=Math.sign(o);n>=0?(t=(t??=0)<0?Math.max(e.length+t,0):Math.min(t,e.length),s=(s??=e.length)<0?Math.max(e.length+s,0):Math.min(s,e.length)):(t=(t??=e.length-1)<0?Math.max(e.length+t,-1):Math.min(t,e.length-1),s=(s??=-1)<-1?Math.max(e.length+s,-1):Math.min(s,e.length-1));const r=[];for(let a=t;n*a<n*s;a+=o)r.push(e[a]);return r}function O(e){return e.replace(/\b\w/g,(e=>e.toUpperCase()))}var D=class{type="RuntimeValue";value;builtins=new Map;constructor(e=void 0){this.value=e}__bool__(){return new q(!!this.value)}},N=class extends D{type="NumericValue"},V=class extends D{type="StringValue";builtins=new Map([["upper",new W((()=>new V(this.value.toUpperCase())))],["lower",new W((()=>new V(this.value.toLowerCase())))],["strip",new W((()=>new V(this.value.trim())))],["title",new W((()=>new V(O(this.value))))],["length",new N(this.value.length)]])},q=class extends D{type="BooleanValue"},j=class extends D{type="ObjectValue";__bool__(){return new q(this.value.size>0)}builtins=new Map([["get",new W((([e,t])=>{if(!(e instanceof V))throw new Error(`Object key must be a string: got ${e.type}`);return this.value.get(e.value)??t??new $}))],["items",new W((()=>new R(Array.from(this.value.entries()).map((([e,t])=>new R([new V(e),t]))))))]])},R=class extends D{type="ArrayValue";builtins=new Map([["length",new N(this.value.length)]]);__bool__(){return new q(this.value.length>0)}},G=class extends R{type="TupleValue"},W=class extends D{type="FunctionValue"},$=class extends D{type="NullValue"},U=class extends D{type="UndefinedValue"},X=class{constructor(e){this.parent=e}variables=new Map([["namespace",new W((e=>{if(0===e.length)return new j(new Map);if(1!==e.length||!(e[0]instanceof j))throw new Error("`namespace` expects either zero arguments or a single object argument");return e[0]}))]]);tests=new Map([["boolean",e=>"BooleanValue"===e.type],["callable",e=>e instanceof W],["odd",e=>{if("NumericValue"!==e.type)throw new Error(`Cannot apply test "odd" to type: ${e.type}`);return e.value%2!=0}],["even",e=>{if("NumericValue"!==e.type)throw new Error(`Cannot apply test "even" to type: ${e.type}`);return e.value%2==0}],["false",e=>"BooleanValue"===e.type&&!e.value],["true",e=>"BooleanValue"===e.type&&e.value],["number",e=>"NumericValue"===e.type],["integer",e=>"NumericValue"===e.type&&Number.isInteger(e.value)],["iterable",e=>e instanceof R||e instanceof V],["lower",e=>{const t=e.value;return"StringValue"===e.type&&t===t.toLowerCase()}],["upper",e=>{const t=e.value;return"StringValue"===e.type&&t===t.toUpperCase()}],["none",e=>"NullValue"===e.type],["defined",e=>"UndefinedValue"!==e.type],["undefined",e=>"UndefinedValue"===e.type],["equalto",(e,t)=>e.value===t.value]]);set(e,t){return this.declareVariable(e,H(t))}declareVariable(e,t){if(this.variables.has(e))throw new SyntaxError(`Variable already declared: ${e}`);return this.variables.set(e,t),t}setVariable(e,t){return this.variables.set(e,t),t}resolve(e){if(this.variables.has(e))return this;if(this.parent)return this.parent.resolve(e);throw new Error(`Unknown variable: ${e}`)}lookupVariable(e){try{return this.resolve(e).variables.get(e)??new U}catch{return new U}}},Q=class{global;constructor(e){this.global=e??new X}run(e){return this.evaluate(e,this.global)}evaluateBinaryExpression(e,t){const s=this.evaluate(e.left,t);switch(e.operator.value){case"and":return s.__bool__().value?this.evaluate(e.right,t):s;case"or":return s.__bool__().value?s:this.evaluate(e.right,t)}const o=this.evaluate(e.right,t);switch(e.operator.value){case"==":return new q(s.value==o.value);case"!=":return new q(s.value!=o.value)}if(s instanceof U||o instanceof U)throw new Error("Cannot perform operation on undefined values");if(s instanceof $||o instanceof $)throw new Error("Cannot perform operation on null values");if(s instanceof N&&o instanceof N)switch(e.operator.value){case"+":return new N(s.value+o.value);case"-":return new N(s.value-o.value);case"*":return new N(s.value*o.value);case"/":return new N(s.value/o.value);case"%":return new N(s.value%o.value);case"<":return new q(s.value<o.value);case">":return new q(s.value>o.value);case">=":return new q(s.value>=o.value);case"<=":return new q(s.value<=o.value)}else if(s instanceof R&&o instanceof R){if("+"===e.operator.value)return new R(s.value.concat(o.value))}else if(o instanceof R){const t=void 0!==o.value.find((e=>e.value===s.value));switch(e.operator.value){case"in":return new q(t);case"not in":return new q(!t)}}if((s instanceof V||o instanceof V)&&"+"===e.operator.value)return new V(s.value.toString()+o.value.toString());if(s instanceof V&&o instanceof V)switch(e.operator.value){case"in":return new q(o.value.includes(s.value));case"not in":return new q(!o.value.includes(s.value))}if(s instanceof V&&o instanceof j)switch(e.operator.value){case"in":return new q(o.value.has(s.value));case"not in":return new q(!o.value.has(s.value))}throw new SyntaxError(`Unknown operator "${e.operator.value}" between ${s.type} and ${o.type}`)}evaluateFilterExpression(e,t){const s=this.evaluate(e.operand,t);if("Identifier"===e.filter.type){const t=e.filter;if(s instanceof R)switch(t.value){case"list":return s;case"first":return s.value[0];case"last":return s.value[s.value.length-1];case"length":return new N(s.value.length);case"reverse":return new R(s.value.reverse());case"sort":return new R(s.value.sort(((e,t)=>{if(e.type!==t.type)throw new Error(`Cannot compare different types: ${e.type} and ${t.type}`);switch(e.type){case"NumericValue":return e.value-t.value;case"StringValue":return e.value.localeCompare(t.value);default:throw new Error(`Cannot compare type: ${e.type}`)}})));default:throw new Error(`Unknown ArrayValue filter: ${t.value}`)}else if(s instanceof V)switch(t.value){case"length":return new N(s.value.length);case"upper":return new V(s.value.toUpperCase());case"lower":return new V(s.value.toLowerCase());case"title":return new V(O(s.value));case"capitalize":return new V(s.value.charAt(0).toUpperCase()+s.value.slice(1));case"trim":return new V(s.value.trim());default:throw new Error(`Unknown StringValue filter: ${t.value}`)}else{if(s instanceof N){if("abs"===t.value)return new N(Math.abs(s.value));throw new Error(`Unknown NumericValue filter: ${t.value}`)}if(s instanceof j)switch(t.value){case"items":return new R(Array.from(s.value.entries()).map((([e,t])=>new R([new V(e),t]))));case"length":return new N(s.value.size);default:throw new Error(`Unknown ObjectValue filter: ${t.value}`)}}throw new Error(`Cannot apply filter "${t.value}" to type: ${s.type}`)}if("CallExpression"===e.filter.type){const o=e.filter;if("Identifier"!==o.callee.type)throw new Error(`Unknown filter: ${o.callee.type}`);const n=o.callee.value;if(s instanceof R){if("selectattr"===n){if(s.value.some((e=>!(e instanceof j))))throw new Error("`selectattr` can only be applied to array of objects");if(o.args.some((e=>"StringLiteral"!==e.type)))throw new Error("arguments of `selectattr` must be strings");const[e,n,r]=o.args.map((e=>this.evaluate(e,t)));let a;if(n){const e=t.tests.get(n.value);if(!e)throw new Error(`Unknown test: ${n.value}`);a=e}else a=(...e)=>e[0].__bool__().value;const i=s.value.filter((t=>{const s=t.value.get(e.value);return!!s&&a(s,r)}));return new R(i)}throw new Error(`Unknown ArrayValue filter: ${n}`)}throw new Error(`Cannot apply filter "${n}" to type: ${s.type}`)}throw new Error(`Unknown filter: ${e.filter.type}`)}evaluateTestExpression(e,t){const s=this.evaluate(e.operand,t),o=t.tests.get(e.test.value);if(!o)throw new Error(`Unknown test: ${e.test.value}`);const n=o(s);return new q(e.negate?!n:n)}evaluateUnaryExpression(e,t){const s=this.evaluate(e.argument,t);if("not"===e.operator.value)return new q(!s.value);throw new SyntaxError(`Unknown operator: ${e.operator.value}`)}evalProgram(e,t){return this.evaluateBlock(e.body,t)}evaluateBlock(e,t){let s="";for(const o of e){const e=this.evaluate(o,t);"NullValue"!==e.type&&"UndefinedValue"!==e.type&&(s+=e.value)}return new V(s)}evaluateIdentifier(e,t){return t.lookupVariable(e.value)}evaluateCallExpression(e,t){const s=[],o=new Map;for(const n of e.args)if("KeywordArgumentExpression"===n.type){const e=n;o.set(e.key.value,this.evaluate(e.value,t))}else s.push(this.evaluate(n,t));o.size>0&&s.push(new j(o));const n=this.evaluate(e.callee,t);if("FunctionValue"!==n.type)throw new Error(`Cannot call something that is not a function: got ${n.type}`);return n.value(s,t)}evaluateSliceExpression(e,t,s){if(!(e instanceof R||e instanceof V))throw new Error("Slice object must be an array or string");const o=this.evaluate(t.start,s),n=this.evaluate(t.stop,s),r=this.evaluate(t.step,s);if(!(o instanceof N||o instanceof U))throw new Error("Slice start must be numeric or undefined");if(!(n instanceof N||n instanceof U))throw new Error("Slice stop must be numeric or undefined");if(!(r instanceof N||r instanceof U))throw new Error("Slice step must be numeric or undefined");return e instanceof R?new R(I(e.value,o.value,n.value,r.value)):new V(I(Array.from(e.value),o.value,n.value,r.value).join(""))}evaluateMemberExpression(e,t){const s=this.evaluate(e.object,t);let o,n;if(e.computed){if("SliceExpression"===e.property.type)return this.evaluateSliceExpression(s,e.property,t);o=this.evaluate(e.property,t)}else o=new V(e.property.value);if(s instanceof j){if(!(o instanceof V))throw new Error(`Cannot access property with non-string: got ${o.type}`);n=s.value.get(o.value)??s.builtins.get(o.value)}else if(s instanceof R||s instanceof V)if(o instanceof N)n=s.value.at(o.value),s instanceof V&&(n=new V(s.value.at(o.value)));else{if(!(o instanceof V))throw new Error(`Cannot access property with non-string/non-number: got ${o.type}`);n=s.builtins.get(o.value)}else{if(!(o instanceof V))throw new Error(`Cannot access property with non-string: got ${o.type}`);n=s.builtins.get(o.value)}return n instanceof D?n:new U}evaluateSet(e,t){const s=this.evaluate(e.value,t);if("Identifier"===e.assignee.type){const o=e.assignee.value;t.setVariable(o,s)}else{if("MemberExpression"!==e.assignee.type)throw new Error(`Invalid LHS inside assignment expression: ${JSON.stringify(e.assignee)}`);{const o=e.assignee,n=this.evaluate(o.object,t);if(!(n instanceof j))throw new Error("Cannot assign to member of non-object");if("Identifier"!==o.property.type)throw new Error("Cannot assign to member with non-identifier property");n.value.set(o.property.value,s)}}return new $}evaluateIf(e,t){const s=this.evaluate(e.test,t);return this.evaluateBlock(s.__bool__().value?e.body:e.alternate,t)}evaluateFor(e,t){const s=new X(t),o=this.evaluate(e.iterable,s);if(!(o instanceof R))throw new Error(`Expected iterable type in for loop: got ${o.type}`);let n="";for(let t=0;t<o.value.length;++t){const r=new Map([["index",new N(t+1)],["index0",new N(t)],["revindex",new N(o.value.length-t)],["revindex0",new N(o.value.length-t-1)],["first",new q(0===t)],["last",new q(t===o.value.length-1)],["length",new N(o.value.length)],["previtem",t>0?o.value[t-1]:new U],["nextitem",t<o.value.length-1?o.value[t+1]:new U]]);s.setVariable("loop",new j(r));const a=o.value[t];if("Identifier"===e.loopvar.type)s.setVariable(e.loopvar.value,a);else if("TupleLiteral"===e.loopvar.type){const t=e.loopvar;if("ArrayValue"!==a.type)throw new Error(`Cannot unpack non-iterable type: ${a.type}`);const o=a;if(t.value.length!==o.value.length)throw new Error(`Too ${t.value.length>o.value.length?"few":"many"} items to unpack`);for(let e=0;e<t.value.length;++e){if("Identifier"!==t.value[e].type)throw new Error(`Cannot unpack non-identifier type: ${t.value[e].type}`);s.setVariable(t.value[e].value,o.value[e])}}n+=this.evaluateBlock(e.body,s).value}return new V(n)}evaluate(e,t){if(void 0===e)return new U;switch(e.type){case"Program":return this.evalProgram(e,t);case"Set":return this.evaluateSet(e,t);case"If":return this.evaluateIf(e,t);case"For":return this.evaluateFor(e,t);case"NumericLiteral":return new N(Number(e.value));case"StringLiteral":return new V(e.value);case"BooleanLiteral":return new q(e.value);case"ArrayLiteral":return new R(e.value.map((e=>this.evaluate(e,t))));case"TupleLiteral":return new G(e.value.map((e=>this.evaluate(e,t))));case"ObjectLiteral":{const s=new Map;for(const[o,n]of e.value){const e=this.evaluate(o,t);if(!(e instanceof V))throw new Error(`Object keys must be strings: got ${e.type}`);s.set(e.value,this.evaluate(n,t))}return new j(s)}case"Identifier":return this.evaluateIdentifier(e,t);case"CallExpression":return this.evaluateCallExpression(e,t);case"MemberExpression":return this.evaluateMemberExpression(e,t);case"UnaryExpression":return this.evaluateUnaryExpression(e,t);case"BinaryExpression":return this.evaluateBinaryExpression(e,t);case"FilterExpression":return this.evaluateFilterExpression(e,t);case"TestExpression":return this.evaluateTestExpression(e,t);default:throw new SyntaxError(`Unknown node type: ${e.type}`)}}};function H(e){switch(typeof e){case"number":return new N(e);case"string":return new V(e);case"boolean":return new q(e);case"object":return null===e?new $:Array.isArray(e)?new R(e.map(H)):new j(new Map(Object.entries(e).map((([e,t])=>[e,H(t)]))));case"function":return new W(((t,s)=>H(e(...t.map((e=>e.value)))??null)));default:throw new Error(`Cannot convert to runtime value: ${e}`)}}var Y=class{parsed;constructor(e){const t=d(e,{lstrip_blocks:!0,trim_blocks:!0});this.parsed=z(t)}render(e){const t=new X;t.set("false",!1),t.set("true",!0),t.set("raise_exception",(e=>{throw new Error(e)})),t.set("range",B);for(const[s,o]of Object.entries(e))t.set(s,o);return new Q(t).run(this.parsed).value}}},"./src/backends/onnx.js":
/*!******************************!*\
!*** ./src/backends/onnx.js ***!
\******************************/(e,t,s)=>{s.r(t),s.d(t,{ONNX:()=>n,executionProviders:()=>r});s(/*! onnxruntime-web */"onnxruntime-web");const o=ort;let n;const r=["wasm"];if("undefined"!=typeof process&&"node"===process?.release?.name)n=ONNX_NODE.default??ONNX_NODE,r.unshift("cpu");else{n=o.default??o;"undefined"!=typeof navigator&&/iP(hone|od|ad).+16_4.+AppleWebKit/.test(navigator.userAgent)&&(n.env.wasm.simd=!1)}},"./src/configs.js":
/*!************************!*\
!*** ./src/configs.js ***!
\************************/(e,t,s)=>{s.r(t),s.d(t,{AutoConfig:()=>r,PretrainedConfig:()=>n});var o=s(/*! ./utils/hub.js */"./src/utils/hub.js");class n{constructor(e){this.model_type=null,this.is_encoder_decoder=!1,Object.assign(this,e)}static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:n=null,local_files_only:r=!1,revision:a="main"}={}){let i=s??await async function(e,t){return await(0,o.getModelJSON)(e,"config.json",!0,t)}(e,{progress_callback:t,config:s,cache_dir:n,local_files_only:r,revision:a});return new this(i)}}class r{static async from_pretrained(...e){return n.from_pretrained(...e)}}},"./src/env.js":
/*!********************!*\
!*** ./src/env.js ***!
\********************/(e,t,s)=>{s.r(t),s.d(t,{env:()=>g});var o=s(/*! fs */"?569f"),n=s(/*! path */"?3f59"),r=s(/*! url */"?154a"),a=s(/*! ./backends/onnx.js */"./src/backends/onnx.js");const{env:i}=a.ONNX,l="2.16.1",c="undefined"!=typeof self&&"caches"in self,d=!M(o),u=!M(n),h=d&&u,p=h?n.dirname(n.dirname(r.fileURLToPath("file:///Users/tarekziade/Dev/mozilla-central/toolkit/components/ml/vendor/tmp/transformers.js/src/env.js"))):"./",_=h?n.join(p,"/.cache/"):null,m="/models/",f=h?n.join(p,m):m;i?.wasm&&(i.wasm.wasmPaths=h?n.join(p,"/dist/"):`https://cdn.jsdelivr.net/npm/@xenova/transformers@${l}/dist/`);const g={backends:{onnx:i,tfjs:{}},__dirname:p,version:l,allowRemoteModels:!0,remoteHost:"https://huggingface.co/",remotePathTemplate:"{model}/resolve/{revision}/",allowLocalModels:!0,localModelPath:f,useFS:d,useBrowserCache:c,useFSCache:d,cacheDir:_,useCustomCache:!1,customCache:null};function M(e){return 0===Object.keys(e).length}},"./src/models.js":
/*!***********************!*\
!*** ./src/models.js ***!
\***********************/(e,t,s)=>{s.r(t),s.d(t,{ASTForAudioClassification:()=>Kt,ASTModel:()=>Zt,ASTPreTrainedModel:()=>Jt,AlbertForMaskedLM:()=>lt,AlbertForQuestionAnswering:()=>it,AlbertForSequenceClassification:()=>at,AlbertModel:()=>rt,AlbertPreTrainedModel:()=>nt,AutoModel:()=>ta,AutoModelForAudioClassification:()=>wa,AutoModelForAudioFrameClassification:()=>ka,AutoModelForCTC:()=>Ma,AutoModelForCausalLM:()=>la,AutoModelForDepthEstimation:()=>Fa,AutoModelForDocumentQuestionAnswering:()=>ba,AutoModelForImageClassification:()=>ha,AutoModelForImageFeatureExtraction:()=>Ca,AutoModelForImageMatting:()=>xa,AutoModelForImageSegmentation:()=>pa,AutoModelForImageToImage:()=>ya,AutoModelForMaskGeneration:()=>ga,AutoModelForMaskedLM:()=>ca,AutoModelForObjectDetection:()=>ma,AutoModelForQuestionAnswering:()=>da,AutoModelForSemanticSegmentation:()=>_a,AutoModelForSeq2SeqLM:()=>na,AutoModelForSequenceClassification:()=>sa,AutoModelForSpeechSeq2Seq:()=>ra,AutoModelForTextToSpectrogram:()=>aa,AutoModelForTextToWaveform:()=>ia,AutoModelForTokenClassification:()=>oa,AutoModelForVision2Seq:()=>ua,AutoModelForXVector:()=>Ta,AutoModelForZeroShotObjectDetection:()=>fa,BartForConditionalGeneration:()=>Tt,BartForSequenceClassification:()=>kt,BartModel:()=>wt,BartPretrainedModel:()=>Mt,BaseModelOutput:()=>q,BeitForImageClassification:()=>fo,BeitModel:()=>mo,BeitPreTrainedModel:()=>_o,BertForMaskedLM:()=>G,BertForQuestionAnswering:()=>U,BertForSequenceClassification:()=>W,BertForTokenClassification:()=>$,BertModel:()=>R,BertPreTrainedModel:()=>j,BlenderbotForConditionalGeneration:()=>St,BlenderbotModel:()=>vt,BlenderbotPreTrainedModel:()=>Pt,BlenderbotSmallForConditionalGeneration:()=>Et,BlenderbotSmallModel:()=>Lt,BlenderbotSmallPreTrainedModel:()=>At,BloomForCausalLM:()=>Us,BloomModel:()=>$s,BloomPreTrainedModel:()=>Ws,CLIPModel:()=>rs,CLIPPreTrainedModel:()=>ns,CLIPSegForImageSegmentation:()=>fs,CLIPSegModel:()=>ms,CLIPSegPreTrainedModel:()=>_s,CLIPTextModelWithProjection:()=>as,CLIPVisionModelWithProjection:()=>is,CamembertForMaskedLM:()=>me,CamembertForQuestionAnswering:()=>Me,CamembertForSequenceClassification:()=>fe,CamembertForTokenClassification:()=>ge,CamembertModel:()=>_e,CamembertPreTrainedModel:()=>pe,CausalLMOutput:()=>za,CausalLMOutputWithPast:()=>Ba,ChineseCLIPModel:()=>ps,ChineseCLIPPreTrainedModel:()=>hs,ClapAudioModelWithProjection:()=>ur,ClapModel:()=>cr,ClapPreTrainedModel:()=>lr,ClapTextModelWithProjection:()=>dr,CodeGenForCausalLM:()=>Bs,CodeGenModel:()=>zs,CodeGenPreTrainedModel:()=>Es,ConvBertForMaskedLM:()=>oe,ConvBertForQuestionAnswering:()=>ae,ConvBertForSequenceClassification:()=>ne,ConvBertForTokenClassification:()=>re,ConvBertModel:()=>se,ConvBertPreTrainedModel:()=>te,ConvNextForImageClassification:()=>Jo,ConvNextModel:()=>Yo,ConvNextPreTrainedModel:()=>Ho,ConvNextV2ForImageClassification:()=>en,ConvNextV2Model:()=>Ko,ConvNextV2PreTrainedModel:()=>Zo,DPTForDepthEstimation:()=>jo,DPTModel:()=>qo,DPTPreTrainedModel:()=>Vo,DebertaForMaskedLM:()=>ke,DebertaForQuestionAnswering:()=>ye,DebertaForSequenceClassification:()=>be,DebertaForTokenClassification:()=>xe,DebertaModel:()=>Te,DebertaPreTrainedModel:()=>we,DebertaV2ForMaskedLM:()=>Pe,DebertaV2ForQuestionAnswering:()=>Ae,DebertaV2ForSequenceClassification:()=>ve,DebertaV2ForTokenClassification:()=>Se,DebertaV2Model:()=>Ce,DebertaV2PreTrainedModel:()=>Fe,DeiTForImageClassification:()=>So,DeiTModel:()=>vo,DeiTPreTrainedModel:()=>Po,DepthAnythingForDepthEstimation:()=>Go,DepthAnythingPreTrainedModel:()=>Ro,DetrForObjectDetection:()=>wo,DetrForSegmentation:()=>To,DetrModel:()=>Mo,DetrObjectDetectionOutput:()=>ko,DetrPreTrainedModel:()=>go,DetrSegmentationOutput:()=>bo,Dinov2ForImageClassification:()=>on,Dinov2Model:()=>sn,Dinov2PreTrainedModel:()=>tn,DistilBertForMaskedLM:()=>Oe,DistilBertForQuestionAnswering:()=>Ie,DistilBertForSequenceClassification:()=>ze,DistilBertForTokenClassification:()=>Be,DistilBertModel:()=>Ee,DistilBertPreTrainedModel:()=>Le,DonutSwinModel:()=>Qo,DonutSwinPreTrainedModel:()=>Xo,EfficientNetForImageClassification:()=>xr,EfficientNetModel:()=>br,EfficientNetPreTrainedModel:()=>kr,ElectraForMaskedLM:()=>ce,ElectraForQuestionAnswering:()=>he,ElectraForSequenceClassification:()=>de,ElectraForTokenClassification:()=>ue,ElectraModel:()=>le,ElectraPreTrainedModel:()=>ie,EsmForMaskedLM:()=>Ve,EsmForSequenceClassification:()=>qe,EsmForTokenClassification:()=>je,EsmModel:()=>Ne,EsmPreTrainedModel:()=>De,FalconForCausalLM:()=>ir,FalconModel:()=>ar,FalconPreTrainedModel:()=>rr,GLPNForDepthEstimation:()=>Uo,GLPNModel:()=>$o,GLPNPreTrainedModel:()=>Wo,GPT2LMHeadModel:()=>ws,GPT2Model:()=>Ms,GPT2PreTrainedModel:()=>gs,GPTBigCodeForCausalLM:()=>Ls,GPTBigCodeModel:()=>As,GPTBigCodePreTrainedModel:()=>Ss,GPTJForCausalLM:()=>vs,GPTJModel:()=>Ps,GPTJPreTrainedModel:()=>Cs,GPTNeoForCausalLM:()=>bs,GPTNeoModel:()=>ks,GPTNeoPreTrainedModel:()=>Ts,GPTNeoXForCausalLM:()=>Fs,GPTNeoXModel:()=>ys,GPTNeoXPreTrainedModel:()=>xs,HubertForCTC:()=>Nn,HubertForSequenceClassification:()=>Vn,HubertModel:()=>Dn,HubertPreTrainedModel:()=>On,ImageMattingOutput:()=>Ia,LlamaForCausalLM:()=>Ds,LlamaModel:()=>Os,LlamaPreTrainedModel:()=>Is,LongT5ForConditionalGeneration:()=>_t,LongT5Model:()=>pt,LongT5PreTrainedModel:()=>ht,M2M100ForConditionalGeneration:()=>gn,M2M100Model:()=>fn,M2M100PreTrainedModel:()=>mn,MBartForCausalLM:()=>Ct,MBartForConditionalGeneration:()=>yt,MBartForSequenceClassification:()=>Ft,MBartModel:()=>xt,MBartPreTrainedModel:()=>bt,MPNetForMaskedLM:()=>He,MPNetForQuestionAnswering:()=>Ze,MPNetForSequenceClassification:()=>Ye,MPNetForTokenClassification:()=>Je,MPNetModel:()=>Qe,MPNetPreTrainedModel:()=>Xe,MT5ForConditionalGeneration:()=>gt,MT5Model:()=>ft,MT5PreTrainedModel:()=>mt,MarianMTModel:()=>_n,MarianModel:()=>pn,MarianPreTrainedModel:()=>hn,MaskedLMOutput:()=>La,MistralForCausalLM:()=>tr,MistralModel:()=>er,MistralPreTrainedModel:()=>Kn,MobileBertForMaskedLM:()=>We,MobileBertForQuestionAnswering:()=>Ue,MobileBertForSequenceClassification:()=>$e,MobileBertModel:()=>Ge,MobileBertPreTrainedModel:()=>Re,MobileViTForImageClassification:()=>ao,MobileViTModel:()=>ro,MobileViTPreTrainedModel:()=>no,ModelOutput:()=>V,MptForCausalLM:()=>Hs,MptModel:()=>Qs,MptPreTrainedModel:()=>Xs,NomicBertModel:()=>Q,NomicBertPreTrainedModel:()=>X,OPTForCausalLM:()=>Zs,OPTModel:()=>Js,OPTPreTrainedModel:()=>Ys,OwlViTForObjectDetection:()=>co,OwlViTModel:()=>lo,OwlViTPreTrainedModel:()=>io,Owlv2ForObjectDetection:()=>po,Owlv2Model:()=>ho,Owlv2PreTrainedModel:()=>uo,PhiForCausalLM:()=>Gs,PhiModel:()=>Rs,PhiPreTrainedModel:()=>js,PreTrainedModel:()=>N,PretrainedMixin:()=>yr,QuestionAnsweringModelOutput:()=>Ea,Qwen2ForCausalLM:()=>qs,Qwen2Model:()=>Vs,Qwen2PreTrainedModel:()=>Ns,ResNetForImageClassification:()=>Eo,ResNetModel:()=>Lo,ResNetPreTrainedModel:()=>Ao,RoFormerForMaskedLM:()=>J,RoFormerForQuestionAnswering:()=>ee,RoFormerForSequenceClassification:()=>Z,RoFormerForTokenClassification:()=>K,RoFormerModel:()=>Y,RoFormerPreTrainedModel:()=>H,RobertaForMaskedLM:()=>It,RobertaForQuestionAnswering:()=>Nt,RobertaForSequenceClassification:()=>Ot,RobertaForTokenClassification:()=>Dt,RobertaModel:()=>Bt,RobertaPreTrainedModel:()=>zt,SamImageSegmentationOutput:()=>un,SamModel:()=>dn,SamPreTrainedModel:()=>cn,SegformerForImageClassification:()=>fr,SegformerForSemanticSegmentation:()=>gr,SegformerModel:()=>mr,SegformerPreTrainedModel:()=>_r,Seq2SeqLMOutput:()=>Pa,SequenceClassifierOutput:()=>va,SiglipModel:()=>cs,SiglipPreTrainedModel:()=>ls,SiglipTextModel:()=>ds,SiglipVisionModel:()=>us,SpeechT5ForSpeechToText:()=>Qn,SpeechT5ForTextToSpeech:()=>Hn,SpeechT5HifiGan:()=>Yn,SpeechT5Model:()=>Xn,SpeechT5PreTrainedModel:()=>Un,SqueezeBertForMaskedLM:()=>tt,SqueezeBertForQuestionAnswering:()=>ot,SqueezeBertForSequenceClassification:()=>st,SqueezeBertModel:()=>et,SqueezeBertPreTrainedModel:()=>Ke,StableLmForCausalLM:()=>Tr,StableLmModel:()=>wr,StableLmPreTrainedModel:()=>Mr,Starcoder2ForCausalLM:()=>nr,Starcoder2Model:()=>or,Starcoder2PreTrainedModel:()=>sr,Swin2SRForImageSuperResolution:()=>No,Swin2SRModel:()=>Do,Swin2SRPreTrainedModel:()=>Oo,SwinForImageClassification:()=>Io,SwinModel:()=>Bo,SwinPreTrainedModel:()=>zo,T5ForConditionalGeneration:()=>ut,T5Model:()=>dt,T5PreTrainedModel:()=>ct,TableTransformerForObjectDetection:()=>Fo,TableTransformerModel:()=>yo,TableTransformerObjectDetectionOutput:()=>Co,TableTransformerPreTrainedModel:()=>xo,TokenClassifierOutput:()=>Aa,TrOCRForCausalLM:()=>Zn,TrOCRPreTrainedModel:()=>Jn,UniSpeechForCTC:()=>Fn,UniSpeechForSequenceClassification:()=>Cn,UniSpeechModel:()=>yn,UniSpeechPreTrainedModel:()=>xn,UniSpeechSatForAudioFrameClassification:()=>Ln,UniSpeechSatForCTC:()=>Sn,UniSpeechSatForSequenceClassification:()=>An,UniSpeechSatModel:()=>vn,UniSpeechSatPreTrainedModel:()=>Pn,ViTForImageClassification:()=>to,ViTModel:()=>eo,ViTPreTrainedModel:()=>Ks,VisionEncoderDecoderModel:()=>os,VitMatteForImageMatting:()=>oo,VitMattePreTrainedModel:()=>so,VitsModel:()=>pr,VitsModelOutput:()=>Oa,VitsPreTrainedModel:()=>hr,Wav2Vec2BertForCTC:()=>Bn,Wav2Vec2BertForSequenceClassification:()=>In,Wav2Vec2BertModel:()=>zn,Wav2Vec2BertPreTrainedModel:()=>En,Wav2Vec2ForAudioFrameClassification:()=>bn,Wav2Vec2ForCTC:()=>Tn,Wav2Vec2ForSequenceClassification:()=>kn,Wav2Vec2Model:()=>wn,Wav2Vec2PreTrainedModel:()=>Mn,WavLMForAudioFrameClassification:()=>$n,WavLMForCTC:()=>Rn,WavLMForSequenceClassification:()=>Gn,WavLMForXVector:()=>Wn,WavLMModel:()=>jn,WavLMPreTrainedModel:()=>qn,WhisperForConditionalGeneration:()=>ss,WhisperModel:()=>ts,WhisperPreTrainedModel:()=>es,XLMForQuestionAnswering:()=>Wt,XLMForSequenceClassification:()=>Rt,XLMForTokenClassification:()=>Gt,XLMModel:()=>qt,XLMPreTrainedModel:()=>Vt,XLMRobertaForMaskedLM:()=>Xt,XLMRobertaForQuestionAnswering:()=>Yt,XLMRobertaForSequenceClassification:()=>Qt,XLMRobertaForTokenClassification:()=>Ht,XLMRobertaModel:()=>Ut,XLMRobertaPreTrainedModel:()=>$t,XLMWithLMHeadModel:()=>jt,XVectorOutput:()=>Sa,YolosForObjectDetection:()=>an,YolosModel:()=>rn,YolosObjectDetectionOutput:()=>ln,YolosPreTrainedModel:()=>nn});var o=s(/*! ./configs.js */"./src/configs.js"),n=s(/*! ./utils/core.js */"./src/utils/core.js"),r=s(/*! ./utils/hub.js */"./src/utils/hub.js"),a=s(/*! ./utils/generation.js */"./src/utils/generation.js"),i=s(/*! ./utils/tensor.js */"./src/utils/tensor.js"),l=s(/*! ./backends/onnx.js */"./src/backends/onnx.js"),c=s(/*! ./transformers.js */"./src/transformers.js");const{InferenceSession:d,Tensor:u,env:h}=l.ONNX,p=0,_=1,m=2,f=3,g=4,M=5,w=new Map,T=new Map,k=new Map;async function b(e,t,s){let o=`onnx/${t}${s.quantized?"_quantized":""}.onnx`,n=await(0,r.getModelFile)(e,o,!0,s);try{return await d.create(n,{executionProviders:l.executionProviders})}catch(e){if(1===l.executionProviders.length&&"wasm"===l.executionProviders[0])throw e;return console.warn(e),console.warn("Something went wrong during model construction (most likely a missing operation). Using `wasm` as a fallback. "),await d.create(n,{executionProviders:["wasm"]})}}async function x(e,t){const s=function(e,t){const s=Object.create(null),o=[];for(const n of e.inputNames){const e=t[n];e instanceof i.Tensor?s[n]=h.wasm.proxy?e.clone():e:o.push(n)}if(o.length>0)throw new Error(`An error occurred during model execution: "Missing the following inputs: ${o.join(", ")}.`);const n=Object.keys(t).length,r=e.inputNames.length;if(n>r){let s=Object.keys(t).filter((t=>!e.inputNames.includes(t)));console.warn(`WARNING: Too many inputs were provided (${n} > ${r}). The following inputs will be ignored: "${s.join(", ")}".`)}return s}(e,t);try{let t=await e.run(s);return t=y(t),t}catch(e){throw console.error(`An error occurred during model execution: "${e}".`),console.error("Inputs given to model:",s),e}}function y(e){for(let t in e)e[t]instanceof u?e[t]=new i.Tensor(e[t]):"object"==typeof e[t]&&y(e[t]);return e}function F(e){if(e instanceof i.Tensor)return e;if(0===e.length)throw Error("items must be non-empty");if(Array.isArray(e[0])){if(e.some((t=>t.length!==e[0].length)))throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=True' and/or 'truncation=True' to have batched tensors with the same length.");return new i.Tensor("int64",BigInt64Array.from(e.flat().map((e=>BigInt(e)))),[e.length,e[0].length])}return new i.Tensor("int64",BigInt64Array.from(e.map((e=>BigInt(e)))),[1,e.length])}function C(e,t){let s=e.config.pad_token_id??null,o=e.config.eos_token_id??null;(0,n.isIntegralNumber)(o)&&(o=[o]);let r=-1!==t.indexOf(s),a=null===o||!o.includes(s);if(r&&a){let e=BigInt64Array.from(t.data.map((e=>e!=s)));return new i.Tensor("int64",e,t.dims)}return(0,i.ones_like)(t)}function P(e,t,s){if(!e.inputNames.includes("position_ids"))return;const o=new BigInt64Array(t.attention_mask.data.length);for(let e=0;e<t.attention_mask.dims[0];++e){let s=e*t.attention_mask.dims[1],n=BigInt(0);for(let e=0;e<t.attention_mask.dims[1];++e){const r=s+e;0n===t.attention_mask.data[r]?o[r]=BigInt(1):(o[r]=n,n+=t.attention_mask.data[r])}}t.position_ids=new i.Tensor("int64",o,t.attention_mask.dims),s&&(t.position_ids=t.position_ids.slice(null,-1).unsqueeze_(-1))}function v(e){return new i.Tensor("bool",[e],[1])}async function S(e,t){let{encoder_outputs:s,past_key_values:o}=t;s||(s=(await z(e,t)).last_hidden_state);let n={input_ids:t.decoder_input_ids,encoder_hidden_states:s};const r=!!o;e.decoder_merged_session.inputNames.includes("use_cache_branch")&&(n.use_cache_branch=v(r)),e.decoder_merged_session.inputNames.includes("encoder_attention_mask")&&(n.encoder_attention_mask=t.attention_mask),P(e.decoder_merged_session,n,r),e.addPastKeyValues(n,o);const a=await x(e.decoder_merged_session,n);let i=a.logits;o=e.getPastKeyValues(a,o);const l=e.getAttentions(a);return new Pa({logits:i,past_key_values:o,encoder_outputs:s,...l})}function A(e,t,s,o){let n=[],r=0;const a=e.requires_attention_mask??!0;let l=s.decoder_input_ids??s.decoder_start_token_id??s.bos_token_id??s.eos_token_id;l instanceof i.Tensor?l=l.tolist().flat():Array.isArray(l)||(l=[l]);for(let s of t){s.dims=[1,...s.dims];let t={inputs:s,encoder_outputs:null,prev_model_outputs:null,output_token_ids:l,done:!1,score:0,id:r++};a&&(t.attention_mask=C(e,s)),n.push(t)}return n}async function L(e,t){const s=e.main_input_name;let o=t.output_token_ids;t.prev_model_outputs&&(o=o.slice(-1));let n={[s]:t.inputs,decoder_input_ids:F(o),encoder_outputs:t.encoder_outputs,past_key_values:t.prev_model_outputs?.past_key_values};t.attention_mask&&(n.attention_mask=t.attention_mask);let r=await e.forward(n);return t.prev_model_outputs=r,t.encoder_outputs=r.encoder_outputs,r}function E(e,t){e.output_token_ids=[...e.output_token_ids,t]}async function z(e,t){const s=Object.create(null);for(const o of e.session.inputNames)s[o]=t[o];return e.session.inputNames.includes("token_type_ids")&&!s.token_type_ids&&(s.token_type_ids=new i.Tensor("int64",new BigInt64Array(s.input_ids.data.length),s.input_ids.dims)),await x(e.session,s)}async function B(e,t){let{input_ids:s,past_key_values:o,attention_mask:n}=t,r={input_ids:s,attention_mask:n??C(e,s)};const a=!!o;e.session.inputNames.includes("use_cache_branch")&&(r.use_cache_branch=v(a)),P(e.session,r,a),e.addPastKeyValues(r,o);let i=await x(e.session,r),l=i.logits;return o=e.getPastKeyValues(i,o),{logits:l,past_key_values:o}}function I(e,t,s,o,n){let r=[],a=0;for(let s of t){let t,i=s.tolist().map(Number);s.dims=[1,...s.dims],n?(t=n[a],t.dims=[1,...t.dims]):t=C(e,s);let l={input:s,model_input_ids:s,attention_mask:t,prev_model_outputs:null,output_token_ids:i,num_output_tokens:o,done:!1,score:0,id:a++};r.push(l)}return r}async function O(e,t){let s=new BigInt64Array(t.output_token_ids.length).fill(1n),o={input_ids:t.model_input_ids,attention_mask:new i.Tensor("int64",s,[1,s.length]),past_key_values:t.prev_model_outputs?.past_key_values},n=await e.forward(o);return t.prev_model_outputs=n,n}function D(e,t){e.output_token_ids=[...e.output_token_ids,t],e.model_input_ids=new i.Tensor("int64",[BigInt(t)],[1,1])}class N extends n.Callable{main_input_name="input_ids";constructor(e,t){super(),this.config=e,this.session=t;const s=k.get(this.constructor),o=w.get(s);this.can_generate=!1,this._runBeam=null,this._getStartBeams=null,this._updateBeam=null,this._forward=null,o===g?(this.can_generate=!0,this._runBeam=O,this._getStartBeams=I,this._updateBeam=D,this._forward=B):o===m||o===f?(this.can_generate=!0,this._runBeam=L,this._getStartBeams=A,this._updateBeam=E,this._forward=S):this._forward=z}async dispose(){const e=[];for(let t of Object.keys(this)){const s=this[t];s instanceof d&&e.push(s.handler.dispose())}return await Promise.all(e)}static async from_pretrained(e,{quantized:t=!0,progress_callback:s=null,config:n=null,cache_dir:a=null,local_files_only:i=!1,revision:l="main",model_file_name:c=null}={}){let d={quantized:t,progress_callback:s,config:n,cache_dir:a,local_files_only:i,revision:l,model_file_name:c};const u=k.get(this),h=w.get(u);let T;return h===g?T=await Promise.all([o.AutoConfig.from_pretrained(e,d),b(e,d.model_file_name??"decoder_model_merged",d),(0,r.getModelJSON)(e,"generation_config.json",!1,d)]):h===m||h===f?T=await Promise.all([o.AutoConfig.from_pretrained(e,d),b(e,"encoder_model",d),b(e,"decoder_model_merged",d),(0,r.getModelJSON)(e,"generation_config.json",!1,d)]):h===M?T=await Promise.all([o.AutoConfig.from_pretrained(e,d),b(e,"vision_encoder",d),b(e,"prompt_encoder_mask_decoder",d)]):h===_?T=await Promise.all([o.AutoConfig.from_pretrained(e,d),b(e,"encoder_model",d),b(e,"decoder_model_merged",d)]):(h!==p&&console.warn(`Model type for '${u??n?.model_type}' not found, assuming encoder-only architecture. Please report this at https://github.com/xenova/transformers.js/issues/new/choose.`),T=await Promise.all([o.AutoConfig.from_pretrained(e,d),b(e,d.model_file_name??"model",d)])),new this(...T)}async _call(e){return await this.forward(e)}async forward(e){return await this._forward(this,e)}_get_logits_processor(e,t,s=null){const o=new a.LogitsProcessorList;if(null!==e.repetition_penalty&&1!==e.repetition_penalty&&o.push(new a.RepetitionPenaltyLogitsProcessor(e.repetition_penalty)),null!==e.no_repeat_ngram_size&&e.no_repeat_ngram_size>0&&o.push(new a.NoRepeatNGramLogitsProcessor(e.no_repeat_ngram_size)),null!==e.bad_words_ids&&o.push(new a.NoBadWordsLogitsProcessor(e.bad_words_ids,e.eos_token_id)),null!==e.min_length&&null!==e.eos_token_id&&e.min_length>0&&o.push(new a.MinLengthLogitsProcessor(e.min_length,e.eos_token_id)),null!==e.min_new_tokens&&null!==e.eos_token_id&&e.min_new_tokens>0&&o.push(new a.MinNewTokensLengthLogitsProcessor(t,e.min_new_tokens,e.eos_token_id)),null!==e.forced_bos_token_id&&o.push(new a.ForcedBOSTokenLogitsProcessor(e.forced_bos_token_id)),null!==e.forced_eos_token_id&&o.push(new a.ForcedEOSTokenLogitsProcessor(e.max_length,e.forced_eos_token_id)),null!==e.begin_suppress_tokens){let s=t>1||null===e.forced_bos_token_id?t:t+1;null!==e.forced_decoder_ids&&(s+=e.forced_decoder_ids[e.forced_decoder_ids.length-1][0]),o.push(new a.SuppressTokensAtBeginLogitsProcessor(e.begin_suppress_tokens,s))}return null!==e.forced_decoder_ids&&o.push(new a.ForceTokensLogitsProcessor(e.forced_decoder_ids)),null!==s&&o.extend(s),o}_get_generation_config(e){let t=new a.GenerationConfig(this.config);return"generation_config"in this&&Object.assign(t,this.generation_config),null!==e&&Object.assign(t,e),t}async generate(e,t=null,s=null,{inputs_attention_mask:o=null}={}){if(!this.can_generate){let e=`The current model class (${k.get(this.constructor)}) is not compatible with \`.generate()\`, as it doesn't have a language model head.`;const t=this.config.model_type,s=Br.get(t)??zr.get(t)??vr.get(t)??Dr.get(t);throw s&&(e+=` Please use the following class instead: '${s[0]}'`),Error(e)}if(!(e instanceof i.Tensor||(0,n.isTypedArray)(e)||Array.isArray(e)))throw Error(`\`inputs\` must be a Tensor, TypedArray, or Array, but is "${e.constructor.name}".`);let r;if(this.config.is_encoder_decoder)r=0;else if(r=e instanceof i.Tensor?e.dims.at(-1):e.length,0===r)throw Error("Must supply a non-empty array of input token ids.");t=this._get_generation_config(t),s=s??new a.LogitsProcessorList,s=this._get_logits_processor(t,r,s);let l=t.eos_token_id;null===l||Array.isArray(l)||(l=[l]);let c=1;const d=c+(t.max_new_tokens??1/0),u=Number.isInteger(t.max_length)&&null===(t.max_new_tokens??null);let h=a.Sampler.getSampler(t),p=this.getStartBeams(e,t,c,o);for(;p.some((e=>!e.done))&&c<d;){let e=[];for(let o of p){if(o.done){e.push(o);continue}if(u&&o.output_token_ids.length>=t.max_length){o.done=!0,e.push(o);continue}let n=await this.runBeam(o);t.output_attentions&&this.addAttentionsToBeam(o,n),t.output_scores;let r=n.logits.slice(null,-1,null);s(o.output_token_ids,r);let a=h(r);for(let[t,s]of a){let n={...o};this.updateBeam(n,t),n.score+=s,l&&l.includes(t)&&(n.done=!0),e.push(n)}}++c,e=this.groupBeams(e).map((e=>e.sort(((e,t)=>t.score-e.score)).slice(0,t.num_beams))),p=e.flat(),t.callback_function&&t.callback_function(p)}const _=this.groupBeams(p),m=e=>_.map((s=>t.num_return_sequences>1?s.slice(0,t.num_return_sequences).map((t=>t[e])):[s[0][e]])).flat(),f=m("output_token_ids");if(t.return_dict_in_generate){return{sequences:f,decoder_attentions:m("decoder_attentions"),cross_attentions:m("cross_attentions")}}return f}addAttentionsToBeam(e,t){if(this.config.is_encoder_decoder){if(!t.cross_attentions||0===t.cross_attentions.length)throw Error("`output_attentions` is true, but the model did not produce cross-attentions. This is most likely because the model was not exported with `output_attentions=True`.");e.cross_attentions||(e.cross_attentions=[]),e.cross_attentions.push(t.cross_attentions)}if(!t.decoder_attentions||0===t.decoder_attentions.length)throw Error("`output_attentions` is true, but the model did not produce decoder-attentions. This is most likely because the model was not exported with `output_attentions=True`.");e.decoder_attentions||(e.decoder_attentions=[]),e.decoder_attentions.push(t.decoder_attentions)}groupBeams(e){const t=Object.create(null);for(const s of e)void 0===t[s.id]?t[s.id]=[s]:t[s.id].push(s);return Object.values(t)}getPastKeyValues(e,t){const s=Object.create(null);for(const o in e)if(o.startsWith("present")){let n=o.replace("present","past_key_values");t&&o.includes("encoder")?s[n]=t[n]:s[n]=e[o]}return s}getAttentions(e){const t=Object.create(null);for(const s of["cross_attentions","decoder_attentions"]){const o=[];for(const t in e)if(t.startsWith(s)){o[t.split(".").pop()]=e[t]}t[s]=o}return t}addPastKeyValues(e,t){if(t)Object.assign(e,t);else{const t=1;if(this.config.is_encoder_decoder&&(this.add_encoder_pkv??1)){let s=[t,this.num_encoder_heads,0,this.encoder_dim_kv],o=[t,this.num_decoder_heads,0,this.decoder_dim_kv];for(let t=0;t<this.num_decoder_layers;++t)e[`past_key_values.${t}.encoder.key`]=new i.Tensor("float32",[],s),e[`past_key_values.${t}.encoder.value`]=new i.Tensor("float32",[],s),e[`past_key_values.${t}.decoder.key`]=new i.Tensor("float32",[],o),e[`past_key_values.${t}.decoder.value`]=new i.Tensor("float32",[],o)}else if("falcon"===this.config.model_type){let s=[t*this.num_heads,0,this.dim_kv];for(let t=0;t<this.num_layers;++t)e[`past_key_values.${t}.key`]=new i.Tensor("float32",[],s),e[`past_key_values.${t}.value`]=new i.Tensor("float32",[],s)}else if(this.config.multi_query){let s=[t*this.num_heads,0,2*this.dim_kv];for(let t=0;t<this.num_layers;++t)e[`past_key_values.${t}.key_value`]=new i.Tensor("float32",[],s)}else if("bloom"===this.config.model_type){let s=[t*this.num_heads,this.dim_kv,0],o=[t*this.num_heads,0,this.dim_kv];for(let t=0;t<this.num_layers;++t)e[`past_key_values.${t}.key`]=new i.Tensor("float32",[],s),e[`past_key_values.${t}.value`]=new i.Tensor("float32",[],o)}else{let s=[t,this.num_heads,0,this.dim_kv];for(let t=0;t<this.num_layers;++t)e[`past_key_values.${t}.key`]=new i.Tensor("float32",[],s),e[`past_key_values.${t}.value`]=new i.Tensor("float32",[],s)}}}getStartBeams(e,t,s,o){return this._getStartBeams(this,e,t,s,o)}async runBeam(e){return await this._runBeam(this,e)}updateBeam(e,t){return this._updateBeam(e,t)}}class V{}class q extends V{constructor({last_hidden_state:e,hidden_states:t=null,attentions:s=null}){super(),this.last_hidden_state=e,this.hidden_states=t,this.attentions=s}}class j extends N{}class R extends j{}class G extends j{async _call(e){return new La(await super._call(e))}}class W extends j{async _call(e){return new va(await super._call(e))}}class $ extends j{async _call(e){return new Aa(await super._call(e))}}class U extends j{async _call(e){return new Ea(await super._call(e))}}class X extends N{}class Q extends X{}class H extends N{}class Y extends H{}class J extends H{async _call(e){return new La(await super._call(e))}}class Z extends H{async _call(e){return new va(await super._call(e))}}class K extends H{async _call(e){return new Aa(await super._call(e))}}class ee extends H{async _call(e){return new Ea(await super._call(e))}}class te extends N{}class se extends te{}class oe extends te{async _call(e){return new La(await super._call(e))}}class ne extends te{async _call(e){return new va(await super._call(e))}}class re extends te{async _call(e){return new Aa(await super._call(e))}}class ae extends te{async _call(e){return new Ea(await super._call(e))}}class ie extends N{}class le extends ie{}class ce extends ie{async _call(e){return new La(await super._call(e))}}class de extends ie{async _call(e){return new va(await super._call(e))}}class ue extends ie{async _call(e){return new Aa(await super._call(e))}}class he extends ie{async _call(e){return new Ea(await super._call(e))}}class pe extends N{}class _e extends pe{}class me extends pe{async _call(e){return new La(await super._call(e))}}class fe extends pe{async _call(e){return new va(await super._call(e))}}class ge extends pe{async _call(e){return new Aa(await super._call(e))}}class Me extends pe{async _call(e){return new Ea(await super._call(e))}}class we extends N{}class Te extends we{}class ke extends we{async _call(e){return new La(await super._call(e))}}class be extends we{async _call(e){return new va(await super._call(e))}}class xe extends we{async _call(e){return new Aa(await super._call(e))}}class ye extends we{async _call(e){return new Ea(await super._call(e))}}class Fe extends N{}class Ce extends Fe{}class Pe extends Fe{async _call(e){return new La(await super._call(e))}}class ve extends Fe{async _call(e){return new va(await super._call(e))}}class Se extends Fe{async _call(e){return new Aa(await super._call(e))}}class Ae extends Fe{async _call(e){return new Ea(await super._call(e))}}class Le extends N{}class Ee extends Le{}class ze extends Le{async _call(e){return new va(await super._call(e))}}class Be extends Le{async _call(e){return new Aa(await super._call(e))}}class Ie extends Le{async _call(e){return new Ea(await super._call(e))}}class Oe extends Le{async _call(e){return new La(await super._call(e))}}class De extends N{}class Ne extends De{}class Ve extends De{async _call(e){return new La(await super._call(e))}}class qe extends De{async _call(e){return new va(await super._call(e))}}class je extends De{async _call(e){return new Aa(await super._call(e))}}class Re extends N{}class Ge extends Re{}class We extends Re{async _call(e){return new La(await super._call(e))}}class $e extends Re{async _call(e){return new va(await super._call(e))}}class Ue extends Re{async _call(e){return new Ea(await super._call(e))}}class Xe extends N{}class Qe extends Xe{}class He extends Xe{async _call(e){return new La(await super._call(e))}}class Ye extends Xe{async _call(e){return new va(await super._call(e))}}class Je extends Xe{async _call(e){return new Aa(await super._call(e))}}class Ze extends Xe{async _call(e){return new Ea(await super._call(e))}}class Ke extends N{}class et extends Ke{}class tt extends Ke{async _call(e){return new La(await super._call(e))}}class st extends Ke{async _call(e){return new va(await super._call(e))}}class ot extends Ke{async _call(e){return new Ea(await super._call(e))}}class nt extends N{}class rt extends nt{}class at extends nt{async _call(e){return new va(await super._call(e))}}class it extends nt{async _call(e){return new Ea(await super._call(e))}}class lt extends nt{async _call(e){return new La(await super._call(e))}}class ct extends N{}class dt extends ct{}class ut extends ct{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.num_decoder_layers,this.num_decoder_heads=this.config.num_heads,this.decoder_dim_kv=this.config.d_kv,this.num_encoder_layers=this.config.num_layers,this.num_encoder_heads=this.config.num_heads,this.encoder_dim_kv=this.config.d_kv}}class ht extends N{}class pt extends ht{}class _t extends ht{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.num_decoder_layers,this.num_decoder_heads=this.config.num_heads,this.decoder_dim_kv=this.config.d_kv,this.num_encoder_layers=this.config.num_layers,this.num_encoder_heads=this.config.num_heads,this.encoder_dim_kv=this.config.d_kv}}class mt extends N{}class ft extends mt{}class gt extends mt{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.num_decoder_layers,this.num_decoder_heads=this.config.num_heads,this.decoder_dim_kv=this.config.d_kv,this.num_encoder_layers=this.config.num_layers,this.num_encoder_heads=this.config.num_heads,this.encoder_dim_kv=this.config.d_kv}}class Mt extends N{}class wt extends Mt{}class Tt extends Mt{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.d_model/this.num_encoder_heads}}class kt extends Mt{async _call(e){return new va(await super._call(e))}}class bt extends N{}class xt extends bt{}class yt extends bt{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.d_model/this.num_encoder_heads}}class Ft extends bt{async _call(e){return new va(await super._call(e))}}class Ct extends bt{constructor(e,t,s){super(e,t),this.generation_config=s,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.d_model/this.num_encoder_heads}}class Pt extends N{}class vt extends Pt{}class St extends Pt{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.d_model/this.num_encoder_heads}}class At extends N{}class Lt extends At{}class Et extends At{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.d_model/this.num_encoder_heads}}class zt extends N{}class Bt extends zt{}class It extends zt{async _call(e){return new La(await super._call(e))}}class Ot extends zt{async _call(e){return new va(await super._call(e))}}class Dt extends zt{async _call(e){return new Aa(await super._call(e))}}class Nt extends zt{async _call(e){return new Ea(await super._call(e))}}class Vt extends N{}class qt extends Vt{}class jt extends Vt{async _call(e){return new La(await super._call(e))}}class Rt extends Vt{async _call(e){return new va(await super._call(e))}}class Gt extends Vt{async _call(e){return new Aa(await super._call(e))}}class Wt extends Vt{async _call(e){return new Ea(await super._call(e))}}class $t extends N{}class Ut extends $t{}class Xt extends $t{async _call(e){return new La(await super._call(e))}}class Qt extends $t{async _call(e){return new va(await super._call(e))}}class Ht extends $t{async _call(e){return new Aa(await super._call(e))}}class Yt extends $t{async _call(e){return new Ea(await super._call(e))}}class Jt extends N{}class Zt extends Jt{}class Kt extends Jt{}class es extends N{}class ts extends es{}class ss extends es{requires_attention_mask=!1;main_input_name="input_features";constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.d_model/this.num_encoder_heads}async generate(e,t=null,s=null){if(t=this._get_generation_config(t),t.return_timestamps??=!1,t.return_timestamps&&(s=[new a.WhisperTimeStampLogitsProcessor(t)]),t.return_token_timestamps&&(t.output_attentions=!0,t.return_dict_in_generate=!0,"translate"===t.task&&console.warn("Token-level timestamps may not be reliable for task 'translate'."),!t.alignment_heads))throw new Error("Model generation config has no `alignment_heads`, token-level timestamps not available. See https://gist.github.com/hollance/42e32852f24243b748ae6bc1f985b13a on how to add this property to the generation config.");const o=await super.generate(e,t,s);return t.return_token_timestamps&&t.alignment_heads&&(o.token_timestamps=this._extract_token_timestamps(o,t.alignment_heads,t.num_frames)),o}_extract_token_timestamps(e,t,s=null,o=.02){if(!e.cross_attentions)throw new Error("Model outputs must contain cross attentions to extract timestamps. This is most likely because the model was not exported with `output_attentions=True`.");let r=this.config.median_filter_width;void 0===r&&(console.warn("Model config has no `median_filter_width`, using default value of 7."),r=7);const a=e.cross_attentions.map((e=>{let o=Array.from({length:this.config.decoder_layers},((t,s)=>(0,i.cat)(e.map((e=>e[s])),2))),n=(0,i.stack)(t.map((([e,t])=>s?o[e].slice(null,t,null,[0,s]):o[e].slice(null,t))));n=n.transpose(1,0,2,3);let[a,l]=(0,i.std_mean)(n,-2,0,!0),d=n.clone();for(let e=0;e<d.dims[0];++e){let t=d[e];for(let s=0;s<t.dims[0];++s){let o=t[s];const n=a[e][s][0],i=l[e][s][0];for(let e=0;e<o.dims[0];++e){let t=o[e];for(let e=0;e<t.data.length;++e)t.data[e]=(t.data[e]-i.data[e])/n.data[e];t.data.set((0,c.medianFilter)(t.data,r))}}}return(0,i.mean)(d,1)})),l=[e.sequences.length,e.sequences[0].length],d=new i.Tensor("float32",new Float32Array(l[0]*l[1]),l);for(let e=0;e<l[0];++e){const t=a[e].neg().squeeze_(0);let[s,r]=(0,i.dynamicTimeWarping)(t),l=Array.from({length:s.length-1},((e,t)=>s[t+1]-s[t])),c=(0,n.mergeArrays)([1],l).map((e=>!!e)),u=[];for(let e=0;e<c.length;++e)c[e]&&u.push(r[e]*o);d[e].data.set(u,1)}return d}}class os extends N{main_input_name="pixel_values";constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o;const n=this.config.encoder,r=this.config.decoder,a=n.model_type;(Fr.get(a)??Cr.get(a))||console.warn(`Model type for encoder '${a}' not found, assuming encoder-only architecture. Please report this at https://github.com/xenova/transformers.js/issues/new/choose.`);const i=Br.get(r.model_type);if(!i)throw new Error(`Unable to construct \`VisionEncoderDecoder\` due to unsupported decoder: "${this.config.decoder.model_type}"`);const l=new(0,i[1])(r,s,o);this.add_encoder_pkv="num_decoder_layers"in l,this.add_encoder_pkv?(this.num_decoder_layers=l.num_decoder_layers,this.num_decoder_heads=l.num_decoder_heads,this.decoder_dim_kv=l.decoder_dim_kv,this.num_encoder_layers=l.num_encoder_layers,this.num_encoder_heads=l.num_encoder_heads,this.encoder_dim_kv=l.encoder_dim_kv):(this.num_layers=l.num_layers,this.num_heads=l.num_heads,this.dim_kv=l.dim_kv)}}class ns extends N{}class rs extends ns{}class as extends ns{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class is extends ns{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class ls extends N{}class cs extends ls{}class ds extends ls{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class us extends ns{static async from_pretrained(e,t={}){return t.model_file_name??="vision_model",super.from_pretrained(e,t)}}class hs extends N{}class ps extends hs{}class _s extends N{}class ms extends _s{}class fs extends _s{}class gs extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.n_head,this.num_layers=this.config.n_layer,this.dim_kv=this.config.n_embd/this.num_heads}}class Ms extends gs{}class ws extends gs{}class Ts extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_heads,this.num_layers=this.config.num_layers,this.dim_kv=this.config.hidden_size/this.num_heads}}class ks extends Ts{}class bs extends Ts{}class xs extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_attention_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.num_heads}}class ys extends xs{}class Fs extends xs{}class Cs extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.n_head,this.num_layers=this.config.n_layer,this.dim_kv=this.config.n_embd/this.num_heads}}class Ps extends Cs{}class vs extends Cs{}class Ss extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.n_head,this.num_layers=this.config.n_layer,this.dim_kv=this.config.n_embd/this.num_heads}}class As extends Ss{}class Ls extends Ss{}class Es extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.n_head,this.num_layers=this.config.n_layer,this.dim_kv=this.config.n_embd/this.num_heads}}class zs extends Es{}class Bs extends Es{}class Is extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_key_value_heads??this.config.num_attention_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.config.num_attention_heads}}class Os extends Is{}class Ds extends Is{}class Ns extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_key_value_heads??this.config.num_attention_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.config.num_attention_heads}}class Vs extends Ns{}class qs extends Ns{}class js extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_attention_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.num_heads}}class Rs extends js{}class Gs extends js{}class Ws extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.n_head,this.num_layers=this.config.n_layer,this.dim_kv=this.config.hidden_size/this.num_heads}}class $s extends Ws{}class Us extends Ws{}class Xs extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.n_heads,this.num_layers=this.config.n_layers,this.dim_kv=this.config.d_model/this.num_heads}}class Qs extends Xs{}class Hs extends Xs{}class Ys extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_attention_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.num_heads}}class Js extends Ys{}class Zs extends Ys{}class Ks extends N{}class eo extends Ks{}class to extends Ks{async _call(e){return new va(await super._call(e))}}class so extends N{}class oo extends so{async _call(e){return new Ia(await super._call(e))}}class no extends N{}class ro extends no{}class ao extends no{async _call(e){return new va(await super._call(e))}}class io extends N{}class lo extends io{}class co extends io{}class uo extends N{}class ho extends uo{}class po extends uo{}class _o extends N{}class mo extends _o{}class fo extends _o{async _call(e){return new va(await super._call(e))}}class go extends N{}class Mo extends go{}class wo extends go{async _call(e){return new ko(await super._call(e))}}class To extends go{async _call(e){return new bo(await super._call(e))}}class ko extends V{constructor({logits:e,pred_boxes:t}){super(),this.logits=e,this.pred_boxes=t}}class bo extends V{constructor({logits:e,pred_boxes:t,pred_masks:s}){super(),this.logits=e,this.pred_boxes=t,this.pred_masks=s}}class xo extends N{}class yo extends xo{}class Fo extends xo{async _call(e){return new Co(await super._call(e))}}class Co extends ko{}class Po extends N{}class vo extends Po{}class So extends Po{async _call(e){return new va(await super._call(e))}}class Ao extends N{}class Lo extends Ao{}class Eo extends Ao{async _call(e){return new va(await super._call(e))}}class zo extends N{}class Bo extends zo{}class Io extends zo{async _call(e){return new va(await super._call(e))}}class Oo extends N{}class Do extends Oo{}class No extends Oo{}class Vo extends N{}class qo extends Vo{}class jo extends Vo{}class Ro extends N{}class Go extends Ro{}class Wo extends N{}class $o extends Wo{}class Uo extends Wo{}class Xo extends N{}class Qo extends Xo{}class Ho extends N{}class Yo extends Ho{}class Jo extends Ho{async _call(e){return new va(await super._call(e))}}class Zo extends N{}class Ko extends Zo{}class en extends Zo{async _call(e){return new va(await super._call(e))}}class tn extends N{}class sn extends tn{}class on extends tn{async _call(e){return new va(await super._call(e))}}class nn extends N{}class rn extends nn{}class an extends nn{async _call(e){return new ln(await super._call(e))}}class ln extends V{constructor({logits:e,pred_boxes:t}){super(),this.logits=e,this.pred_boxes=t}}class cn extends N{}class dn extends cn{constructor(e,t,s){super(e,t),this.prompt_encoder_mask_decoder=s}async get_image_embeddings({pixel_values:e}){return await z(this,{pixel_values:e})}async forward(e){if(e.image_embeddings&&e.image_positional_embeddings||(e={...e,...await this.get_image_embeddings(e)}),!e.input_labels){const t=e.input_points.dims.slice(0,-1),s=t.reduce(((e,t)=>e*t),1);e.input_labels=new i.Tensor("int64",new BigInt64Array(s).fill(1n),t)}return await x(this.prompt_encoder_mask_decoder,{input_points:e.input_points,input_labels:e.input_labels,image_embeddings:e.image_embeddings,image_positional_embeddings:e.image_positional_embeddings})}async _call(e){return new un(await super._call(e))}}class un extends V{constructor({iou_scores:e,pred_masks:t}){super(),this.iou_scores=e,this.pred_masks=t}}class hn extends N{}class pn extends hn{}class _n extends hn{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.d_model/this.num_encoder_heads}}class mn extends N{}class fn extends mn{}class gn extends mn{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.d_model/this.num_encoder_heads}}class Mn extends N{}class wn extends Mn{}class Tn extends Mn{async _call(e){return new za(await super._call(e))}}class kn extends Mn{async _call(e){return new va(await super._call(e))}}class bn extends Mn{async _call(e){return new Aa(await super._call(e))}}class xn extends N{}class yn extends xn{}class Fn extends xn{async _call(e){return new za(await super._call(e))}}class Cn extends xn{async _call(e){return new va(await super._call(e))}}class Pn extends N{}class vn extends Pn{}class Sn extends Pn{async _call(e){return new za(await super._call(e))}}class An extends Pn{async _call(e){return new va(await super._call(e))}}class Ln extends Pn{async _call(e){return new Aa(await super._call(e))}}class En extends N{}class zn extends En{}class Bn extends En{async _call(e){return new za(await super._call(e))}}class In extends En{async _call(e){return new va(await super._call(e))}}class On extends N{}class Dn extends Mn{}class Nn extends Mn{async _call(e){return new za(await super._call(e))}}class Vn extends Mn{async _call(e){return new va(await super._call(e))}}class qn extends N{}class jn extends qn{}class Rn extends qn{async _call(e){return new za(await super._call(e))}}class Gn extends qn{async _call(e){return new va(await super._call(e))}}class Wn extends qn{async _call(e){return new Sa(await super._call(e))}}class $n extends qn{async _call(e){return new Aa(await super._call(e))}}class Un extends N{}class Xn extends Un{}class Qn extends Un{}class Hn extends Un{constructor(e,t,s,o){super(e,t),this.decoder_merged_session=s,this.generation_config=o,this.num_decoder_layers=this.config.decoder_layers,this.num_decoder_heads=this.config.decoder_attention_heads,this.decoder_dim_kv=this.config.hidden_size/this.num_decoder_heads,this.num_encoder_layers=this.config.encoder_layers,this.num_encoder_heads=this.config.encoder_attention_heads,this.encoder_dim_kv=this.config.hidden_size/this.num_encoder_heads}async generate_speech(e,t,{threshold:s=.5,minlenratio:o=0,maxlenratio:n=20,vocoder:r=null}={}){const a={input_ids:e},{encoder_outputs:l,encoder_attention_mask:c}=await z(this,a),d=l.dims[1]/this.config.reduction_factor,u=Math.floor(d*n),h=Math.floor(d*o),p=this.config.num_mel_bins;let _=[],m=null,f=null,g=0;for(;;){++g;const e=v(!!f);let o;o=f?f.output_sequence_out:new i.Tensor("float32",new Float32Array(p),[1,1,p]);let n={use_cache_branch:e,output_sequence:o,encoder_attention_mask:c,speaker_embeddings:t,encoder_hidden_states:l};this.addPastKeyValues(n,m),f=await x(this.decoder_merged_session,n),m=this.getPastKeyValues(f,m);const{prob:r,spectrum:a}=f;if(_.push(a),g>=h&&(Array.from(r.data).filter((e=>e>=s)).length>0||g>=u))break}const M=(0,i.cat)(_),{waveform:w}=await x(r.session,{spectrogram:M});return{spectrogram:M,waveform:w}}}class Yn extends N{main_input_name="spectrogram"}class Jn extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_encoder_layers=this.num_decoder_layers=this.config.decoder_layers,this.num_encoder_heads=this.num_decoder_heads=this.config.decoder_attention_heads,this.encoder_dim_kv=this.decoder_dim_kv=this.config.d_model/this.num_decoder_heads}}class Zn extends Jn{}class Kn extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_key_value_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.config.num_attention_heads}}class er extends Kn{}class tr extends Kn{}class sr extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_key_value_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.config.num_attention_heads}}class or extends sr{}class nr extends sr{}class rr extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_attention_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.config.num_attention_heads}}class ar extends rr{}class ir extends rr{}class lr extends N{}class cr extends lr{}class dr extends lr{static async from_pretrained(e,t={}){return t.model_file_name??="text_model",super.from_pretrained(e,t)}}class ur extends lr{static async from_pretrained(e,t={}){return t.model_file_name??="audio_model",super.from_pretrained(e,t)}}class hr extends N{}class pr extends hr{async _call(e){return new Oa(await super._call(e))}}class _r extends N{}class mr extends _r{}class fr extends _r{}class gr extends _r{}class Mr extends N{constructor(e,t,s){super(e,t),this.generation_config=s,this.config.pad_token_id=this.config.eos_token_id,this.num_heads=this.config.num_attention_heads,this.num_layers=this.config.num_hidden_layers,this.dim_kv=this.config.hidden_size/this.num_heads}}class wr extends Mr{}class Tr extends Mr{}class kr extends N{}class br extends kr{}class xr extends kr{async _call(e){return new va(await super._call(e))}}class yr{static MODEL_CLASS_MAPPINGS=null;static BASE_IF_FAIL=!1;static async from_pretrained(e,{quantized:t=!0,progress_callback:s=null,config:n=null,cache_dir:r=null,local_files_only:a=!1,revision:i="main",model_file_name:l=null}={}){let c={quantized:t,progress_callback:s,config:n,cache_dir:r,local_files_only:a,revision:i,model_file_name:l};if(n=await o.AutoConfig.from_pretrained(e,c),c.config||(c.config=n),!this.MODEL_CLASS_MAPPINGS)throw new Error("`MODEL_CLASS_MAPPINGS` not implemented for this type of `AutoClass`: "+this.name);for(let t of this.MODEL_CLASS_MAPPINGS){const s=t.get(n.model_type);if(s)return await s[1].from_pretrained(e,c)}if(this.BASE_IF_FAIL)return console.warn(`Unknown model class "${n.model_type}", attempting to construct from base class.`),await N.from_pretrained(e,c);throw Error(`Unsupported model type: ${n.model_type}`)}}const Fr=new Map([["bert",["BertModel",R]],["nomic_bert",["NomicBertModel",Q]],["roformer",["RoFormerModel",Y]],["electra",["ElectraModel",le]],["esm",["EsmModel",Ne]],["convbert",["ConvBertModel",se]],["camembert",["CamembertModel",_e]],["deberta",["DebertaModel",Te]],["deberta-v2",["DebertaV2Model",Ce]],["mpnet",["MPNetModel",Qe]],["albert",["AlbertModel",rt]],["distilbert",["DistilBertModel",Ee]],["roberta",["RobertaModel",Bt]],["xlm",["XLMModel",qt]],["xlm-roberta",["XLMRobertaModel",Ut]],["clap",["ClapModel",cr]],["clip",["CLIPModel",rs]],["clipseg",["CLIPSegModel",ms]],["chinese_clip",["ChineseCLIPModel",ps]],["siglip",["SiglipModel",cs]],["mobilebert",["MobileBertModel",Ge]],["squeezebert",["SqueezeBertModel",et]],["wav2vec2",["Wav2Vec2Model",wn]],["wav2vec2-bert",["Wav2Vec2BertModel",zn]],["unispeech",["UniSpeechModel",yn]],["unispeech-sat",["UniSpeechSatModel",vn]],["hubert",["HubertModel",Dn]],["wavlm",["WavLMModel",jn]],["audio-spectrogram-transformer",["ASTModel",Zt]],["vits",["VitsModel",pr]],["detr",["DetrModel",Mo]],["table-transformer",["TableTransformerModel",yo]],["vit",["ViTModel",eo]],["mobilevit",["MobileViTModel",ro]],["owlvit",["OwlViTModel",lo]],["owlv2",["Owlv2Model",ho]],["beit",["BeitModel",mo]],["deit",["DeiTModel",vo]],["convnext",["ConvNextModel",Yo]],["convnextv2",["ConvNextV2Model",Ko]],["dinov2",["Dinov2Model",sn]],["resnet",["ResNetModel",Lo]],["swin",["SwinModel",Bo]],["swin2sr",["Swin2SRModel",Do]],["donut-swin",["DonutSwinModel",Qo]],["yolos",["YolosModel",rn]],["dpt",["DPTModel",qo]],["glpn",["GLPNModel",$o]],["hifigan",["SpeechT5HifiGan",Yn]],["efficientnet",["EfficientNetModel",br]]]),Cr=new Map([["t5",["T5Model",dt]],["longt5",["LongT5Model",pt]],["mt5",["MT5Model",ft]],["bart",["BartModel",wt]],["mbart",["MBartModel",xt]],["marian",["MarianModel",pn]],["whisper",["WhisperModel",ts]],["m2m_100",["M2M100Model",fn]],["blenderbot",["BlenderbotModel",vt]],["blenderbot-small",["BlenderbotSmallModel",Lt]]]),Pr=new Map([["bloom",["BloomModel",$s]],["gpt2",["GPT2Model",Ms]],["gptj",["GPTJModel",Ps]],["gpt_bigcode",["GPTBigCodeModel",As]],["gpt_neo",["GPTNeoModel",ks]],["gpt_neox",["GPTNeoXModel",ys]],["codegen",["CodeGenModel",zs]],["llama",["LlamaModel",Os]],["qwen2",["Qwen2Model",Vs]],["phi",["PhiModel",Rs]],["mpt",["MptModel",Qs]],["opt",["OPTModel",Js]],["mistral",["MistralModel",er]],["starcoder2",["Starcoder2Model",or]],["falcon",["FalconModel",ar]]]),vr=new Map([["speecht5",["SpeechT5ForSpeechToText",Qn]],["whisper",["WhisperForConditionalGeneration",ss]]]),Sr=new Map([["speecht5",["SpeechT5ForTextToSpeech",Hn]]]),Ar=new Map([["vits",["VitsModel",pr]]]),Lr=new Map([["bert",["BertForSequenceClassification",W]],["roformer",["RoFormerForSequenceClassification",Z]],["electra",["ElectraForSequenceClassification",de]],["esm",["EsmForSequenceClassification",qe]],["convbert",["ConvBertForSequenceClassification",ne]],["camembert",["CamembertForSequenceClassification",fe]],["deberta",["DebertaForSequenceClassification",be]],["deberta-v2",["DebertaV2ForSequenceClassification",ve]],["mpnet",["MPNetForSequenceClassification",Ye]],["albert",["AlbertForSequenceClassification",at]],["distilbert",["DistilBertForSequenceClassification",ze]],["roberta",["RobertaForSequenceClassification",Ot]],["xlm",["XLMForSequenceClassification",Rt]],["xlm-roberta",["XLMRobertaForSequenceClassification",Qt]],["bart",["BartForSequenceClassification",kt]],["mbart",["MBartForSequenceClassification",Ft]],["mobilebert",["MobileBertForSequenceClassification",$e]],["squeezebert",["SqueezeBertForSequenceClassification",st]]]),Er=new Map([["bert",["BertForTokenClassification",$]],["roformer",["RoFormerForTokenClassification",K]],["electra",["ElectraForTokenClassification",ue]],["esm",["EsmForTokenClassification",je]],["convbert",["ConvBertForTokenClassification",re]],["camembert",["CamembertForTokenClassification",ge]],["deberta",["DebertaForTokenClassification",xe]],["deberta-v2",["DebertaV2ForTokenClassification",Se]],["mpnet",["MPNetForTokenClassification",Je]],["distilbert",["DistilBertForTokenClassification",Be]],["roberta",["RobertaForTokenClassification",Dt]],["xlm",["XLMForTokenClassification",Gt]],["xlm-roberta",["XLMRobertaForTokenClassification",Ht]]]),zr=new Map([["t5",["T5ForConditionalGeneration",ut]],["longt5",["LongT5ForConditionalGeneration",_t]],["mt5",["MT5ForConditionalGeneration",gt]],["bart",["BartForConditionalGeneration",Tt]],["mbart",["MBartForConditionalGeneration",yt]],["marian",["MarianMTModel",_n]],["m2m_100",["M2M100ForConditionalGeneration",gn]],["blenderbot",["BlenderbotForConditionalGeneration",St]],["blenderbot-small",["BlenderbotSmallForConditionalGeneration",Et]]]),Br=new Map([["bloom",["BloomForCausalLM",Us]],["gpt2",["GPT2LMHeadModel",ws]],["gptj",["GPTJForCausalLM",vs]],["gpt_bigcode",["GPTBigCodeForCausalLM",Ls]],["gpt_neo",["GPTNeoForCausalLM",bs]],["gpt_neox",["GPTNeoXForCausalLM",Fs]],["codegen",["CodeGenForCausalLM",Bs]],["llama",["LlamaForCausalLM",Ds]],["qwen2",["Qwen2ForCausalLM",qs]],["phi",["PhiForCausalLM",Gs]],["mpt",["MptForCausalLM",Hs]],["opt",["OPTForCausalLM",Zs]],["mbart",["MBartForCausalLM",Ct]],["mistral",["MistralForCausalLM",tr]],["starcoder2",["Starcoder2ForCausalLM",nr]],["falcon",["FalconForCausalLM",ir]],["trocr",["TrOCRForCausalLM",Zn]],["stablelm",["StableLmForCausalLM",Tr]]]),Ir=new Map([["bert",["BertForMaskedLM",G]],["roformer",["RoFormerForMaskedLM",J]],["electra",["ElectraForMaskedLM",ce]],["esm",["EsmForMaskedLM",Ve]],["convbert",["ConvBertForMaskedLM",oe]],["camembert",["CamembertForMaskedLM",me]],["deberta",["DebertaForMaskedLM",ke]],["deberta-v2",["DebertaV2ForMaskedLM",Pe]],["mpnet",["MPNetForMaskedLM",He]],["albert",["AlbertForMaskedLM",lt]],["distilbert",["DistilBertForMaskedLM",Oe]],["roberta",["RobertaForMaskedLM",It]],["xlm",["XLMWithLMHeadModel",jt]],["xlm-roberta",["XLMRobertaForMaskedLM",Xt]],["mobilebert",["MobileBertForMaskedLM",We]],["squeezebert",["SqueezeBertForMaskedLM",tt]]]),Or=new Map([["bert",["BertForQuestionAnswering",U]],["roformer",["RoFormerForQuestionAnswering",ee]],["electra",["ElectraForQuestionAnswering",he]],["convbert",["ConvBertForQuestionAnswering",ae]],["camembert",["CamembertForQuestionAnswering",Me]],["deberta",["DebertaForQuestionAnswering",ye]],["deberta-v2",["DebertaV2ForQuestionAnswering",Ae]],["mpnet",["MPNetForQuestionAnswering",Ze]],["albert",["AlbertForQuestionAnswering",it]],["distilbert",["DistilBertForQuestionAnswering",Ie]],["roberta",["RobertaForQuestionAnswering",Nt]],["xlm",["XLMForQuestionAnswering",Wt]],["xlm-roberta",["XLMRobertaForQuestionAnswering",Yt]],["mobilebert",["MobileBertForQuestionAnswering",Ue]],["squeezebert",["SqueezeBertForQuestionAnswering",ot]]]),Dr=new Map([["vision-encoder-decoder",["VisionEncoderDecoderModel",os]]]),Nr=new Map([["vision-encoder-decoder",["VisionEncoderDecoderModel",os]]]),Vr=new Map([["vit",["ViTForImageClassification",to]],["mobilevit",["MobileViTForImageClassification",ao]],["beit",["BeitForImageClassification",fo]],["deit",["DeiTForImageClassification",So]],["convnext",["ConvNextForImageClassification",Jo]],["convnextv2",["ConvNextV2ForImageClassification",en]],["dinov2",["Dinov2ForImageClassification",on]],["resnet",["ResNetForImageClassification",Eo]],["swin",["SwinForImageClassification",Io]],["segformer",["SegformerForImageClassification",fr]],["efficientnet",["EfficientNetForImageClassification",xr]]]),qr=new Map([["detr",["DetrForObjectDetection",wo]],["table-transformer",["TableTransformerForObjectDetection",Fo]],["yolos",["YolosForObjectDetection",an]]]),jr=new Map([["owlvit",["OwlViTForObjectDetection",co]],["owlv2",["Owlv2ForObjectDetection",po]]]),Rr=new Map([["detr",["DetrForSegmentation",To]],["clipseg",["CLIPSegForImageSegmentation",fs]]]),Gr=new Map([["segformer",["SegformerForSemanticSegmentation",gr]]]),Wr=new Map([["sam",["SamModel",dn]]]),$r=new Map([["wav2vec2",["Wav2Vec2ForCTC",Tn]],["wav2vec2-bert",["Wav2Vec2BertForCTC",Bn]],["unispeech",["UniSpeechForCTC",Fn]],["unispeech-sat",["UniSpeechSatForCTC",Sn]],["wavlm",["WavLMForCTC",Rn]],["hubert",["HubertForCTC",Nn]]]),Ur=new Map([["wav2vec2",["Wav2Vec2ForSequenceClassification",kn]],["wav2vec2-bert",["Wav2Vec2BertForSequenceClassification",In]],["unispeech",["UniSpeechForSequenceClassification",Cn]],["unispeech-sat",["UniSpeechSatForSequenceClassification",An]],["wavlm",["WavLMForSequenceClassification",Gn]],["hubert",["HubertForSequenceClassification",Vn]],["audio-spectrogram-transformer",["ASTForAudioClassification",Kt]]]),Xr=new Map([["wavlm",["WavLMForXVector",Wn]]]),Qr=new Map([["unispeech-sat",["UniSpeechSatForAudioFrameClassification",Ln]],["wavlm",["WavLMForAudioFrameClassification",$n]],["wav2vec2",["Wav2Vec2ForAudioFrameClassification",bn]]]),Hr=new Map([["vitmatte",["VitMatteForImageMatting",oo]]]),Yr=new Map([["swin2sr",["Swin2SRForImageSuperResolution",No]]]),Jr=new Map([["dpt",["DPTForDepthEstimation",jo]],["depth_anything",["DepthAnythingForDepthEstimation",Go]],["glpn",["GLPNForDepthEstimation",Uo]]]),Zr=new Map([["clip",["CLIPVisionModelWithProjection",is]],["siglip",["SiglipVisionModel",us]]]),Kr=[[Fr,p],[Cr,_],[Pr,g],[Lr,p],[Er,p],[zr,m],[vr,m],[Br,g],[Ir,p],[Or,p],[Dr,f],[Vr,p],[Rr,p],[Gr,p],[Hr,p],[Yr,p],[Jr,p],[qr,p],[jr,p],[Wr,M],[$r,p],[Ur,p],[Sr,m],[Ar,p],[Xr,p],[Qr,p],[Zr,p]];for(const[e,t]of Kr)for(const[s,o]of e.values())w.set(s,t),k.set(o,s),T.set(s,o);const ea=[["CLIPTextModelWithProjection",as,p],["SiglipTextModel",ds,p],["ClapTextModelWithProjection",dr,p],["ClapAudioModelWithProjection",ur,p]];for(const[e,t,s]of ea)w.set(e,s),k.set(t,e),T.set(e,t);class ta extends yr{static MODEL_CLASS_MAPPINGS=Kr.map((e=>e[0]));static BASE_IF_FAIL=!0}class sa extends yr{static MODEL_CLASS_MAPPINGS=[Lr]}class oa extends yr{static MODEL_CLASS_MAPPINGS=[Er]}class na extends yr{static MODEL_CLASS_MAPPINGS=[zr]}class ra extends yr{static MODEL_CLASS_MAPPINGS=[vr]}class aa extends yr{static MODEL_CLASS_MAPPINGS=[Sr]}class ia extends yr{static MODEL_CLASS_MAPPINGS=[Ar]}class la extends yr{static MODEL_CLASS_MAPPINGS=[Br]}class ca extends yr{static MODEL_CLASS_MAPPINGS=[Ir]}class da extends yr{static MODEL_CLASS_MAPPINGS=[Or]}class ua extends yr{static MODEL_CLASS_MAPPINGS=[Dr]}class ha extends yr{static MODEL_CLASS_MAPPINGS=[Vr]}class pa extends yr{static MODEL_CLASS_MAPPINGS=[Rr]}class _a extends yr{static MODEL_CLASS_MAPPINGS=[Gr]}class ma extends yr{static MODEL_CLASS_MAPPINGS=[qr]}class fa extends yr{static MODEL_CLASS_MAPPINGS=[jr]}class ga extends yr{static MODEL_CLASS_MAPPINGS=[Wr]}class Ma extends yr{static MODEL_CLASS_MAPPINGS=[$r]}class wa extends yr{static MODEL_CLASS_MAPPINGS=[Ur]}class Ta extends yr{static MODEL_CLASS_MAPPINGS=[Xr]}class ka extends yr{static MODEL_CLASS_MAPPINGS=[Qr]}class ba extends yr{static MODEL_CLASS_MAPPINGS=[Nr]}class xa extends yr{static MODEL_CLASS_MAPPINGS=[Hr]}class ya extends yr{static MODEL_CLASS_MAPPINGS=[Yr]}class Fa extends yr{static MODEL_CLASS_MAPPINGS=[Jr]}class Ca extends yr{static MODEL_CLASS_MAPPINGS=[Zr]}class Pa extends V{constructor({logits:e,past_key_values:t,encoder_outputs:s,decoder_attentions:o=null,cross_attentions:n=null}){super(),this.logits=e,this.past_key_values=t,this.encoder_outputs=s,this.decoder_attentions=o,this.cross_attentions=n}}class va extends V{constructor({logits:e}){super(),this.logits=e}}class Sa extends V{constructor({logits:e,embeddings:t}){super(),this.logits=e,this.embeddings=t}}class Aa extends V{constructor({logits:e}){super(),this.logits=e}}class La extends V{constructor({logits:e}){super(),this.logits=e}}class Ea extends V{constructor({start_logits:e,end_logits:t}){super(),this.start_logits=e,this.end_logits=t}}class za extends V{constructor({logits:e}){super(),this.logits=e}}class Ba extends V{constructor({logits:e,past_key_values:t}){super(),this.logits=e,this.past_key_values=t}}class Ia extends V{constructor({alphas:e}){super(),this.alphas=e}}class Oa extends V{constructor({waveform:e,spectrogram:t}){super(),this.waveform=e,this.spectrogram=t}}},"./src/pipelines.js":
/*!**************************!*\
!*** ./src/pipelines.js ***!
\**************************/(e,t,s)=>{s.r(t),s.d(t,{AudioClassificationPipeline:()=>C,AutomaticSpeechRecognitionPipeline:()=>v,DepthEstimationPipeline:()=>N,DocumentQuestionAnsweringPipeline:()=>I,FeatureExtractionPipeline:()=>y,FillMaskPipeline:()=>M,ImageClassificationPipeline:()=>A,ImageFeatureExtractionPipeline:()=>F,ImageSegmentationPipeline:()=>L,ImageToImagePipeline:()=>D,ImageToTextPipeline:()=>S,ObjectDetectionPipeline:()=>z,Pipeline:()=>_,QuestionAnsweringPipeline:()=>g,SummarizationPipeline:()=>T,Text2TextGenerationPipeline:()=>w,TextClassificationPipeline:()=>m,TextGenerationPipeline:()=>b,TextToAudioPipeline:()=>O,TokenClassificationPipeline:()=>f,TranslationPipeline:()=>k,ZeroShotAudioClassificationPipeline:()=>P,ZeroShotClassificationPipeline:()=>x,ZeroShotImageClassificationPipeline:()=>E,ZeroShotObjectDetectionPipeline:()=>B,pipeline:()=>j});var o=s(/*! ./tokenizers.js */"./src/tokenizers.js"),n=s(/*! ./models.js */"./src/models.js"),r=s(/*! ./processors.js */"./src/processors.js"),a=s(/*! ./utils/core.js */"./src/utils/core.js"),i=s(/*! ./utils/maths.js */"./src/utils/maths.js"),l=s(/*! ./utils/audio.js */"./src/utils/audio.js"),c=s(/*! ./utils/tensor.js */"./src/utils/tensor.js"),d=s(/*! ./utils/image.js */"./src/utils/image.js");async function u(e){return Array.isArray(e)||(e=[e]),await Promise.all(e.map((e=>d.RawImage.read(e))))}async function h(e,t){return Array.isArray(e)||(e=[e]),await Promise.all(e.map((e=>"string"==typeof e||e instanceof URL?(0,l.read_audio)(e,t):e instanceof Float64Array?new Float32Array(e):e)))}function p(e,t){t&&(e=e.map((e=>0|e)));const[s,o,n,r]=e;return{xmin:s,ymin:o,xmax:n,ymax:r}}class _ extends a.Callable{constructor({task:e,model:t,tokenizer:s=null,processor:o=null}){super(),this.task=e,this.model=t,this.tokenizer=s,this.processor=o}async dispose(){await this.model.dispose()}}class m extends _{constructor(e){super(e)}async _call(e,{topk:t=1}={}){const s=this.tokenizer(e,{padding:!0,truncation:!0}),o=await this.model(s),n="multi_label_classification"===this.model.config.problem_type?e=>e.sigmoid().data:e=>(0,i.softmax)(e.data),r=this.model.config.id2label,a=[];for(const e of o.logits){const s=n(e),o=(0,i.getTopItems)(s,t).map((e=>({label:r[e[0]],score:e[1]})));1===t?a.push(...o):a.push(o)}return Array.isArray(e)||1===t?a:a[0]}}class f extends _{constructor(e){super(e)}async _call(e,{ignore_labels:t=["O"]}={}){const s=Array.isArray(e),o=this.tokenizer(s?e:[e],{padding:!0,truncation:!0}),n=(await this.model(o)).logits,r=this.model.config.id2label,a=[];for(let e=0;e<n.dims[0];++e){const s=o.input_ids[e],l=n[e],c=[];for(let e=0;e<l.dims[0];++e){const o=l[e],n=(0,i.max)(o.data)[1],a=r?r[n]:`LABEL_${n}`;if(t.includes(a))continue;const d=this.tokenizer.decode([s[e].item()],{skip_special_tokens:!0});if(""===d)continue;const u=(0,i.softmax)(o.data);c.push({entity:a,score:u[n],index:e,word:d,start:null,end:null})}a.push(c)}return s?a:a[0]}}class g extends _{constructor(e){super(e)}async _call(e,t,{topk:s=1}={}){const o=this.tokenizer(e,{text_pair:t,padding:!0,truncation:!0}),n=await this.model(o),r=[];for(let e=0;e<n.start_logits.dims[0];++e){const t=o.input_ids[e],l=t.indexOf(this.tokenizer.sep_token_id),c=Array.from((0,i.softmax)(n.start_logits[e].data)).map(((e,t)=>[e,t])).filter((e=>e[1]>l)),d=Array.from((0,i.softmax)(n.end_logits[e].data)).map(((e,t)=>[e,t])).filter((e=>e[1]>l)),u=(0,a.product)(c,d).filter((e=>e[0][1]<=e[1][1])).map((e=>[e[0][1],e[1][1],e[0][0]*e[1][0]])).sort(((e,t)=>t[2]-e[2]));for(let e=0;e<Math.min(u.length,s);++e){const[s,o,n]=u[e],a=[...t].slice(s,o+1),i=this.tokenizer.decode(a,{skip_special_tokens:!0});r.push({answer:i,score:n})}}return 1===s?r[0]:r}}class M extends _{constructor(e){super(e)}async _call(e,{topk:t=5}={}){const s=this.tokenizer(e,{padding:!0,truncation:!0}),o=await this.model(s),n=[];for(let e=0;e<s.input_ids.dims[0];++e){const r=s.input_ids[e],a=r.indexOf(this.tokenizer.mask_token_id);if(-1===a)throw Error(`Mask token (${this.tokenizer.mask_token}) not found in text.`);const l=o.logits[e][a],c=(0,i.getTopItems)((0,i.softmax)(l.data),t);n.push(c.map((e=>{const t=[...r];return t[a]=e[0],{score:e[1],token:e[0],token_str:this.tokenizer.model.vocab[e[0]],sequence:this.tokenizer.decode(t,{skip_special_tokens:!0})}})))}return Array.isArray(e)?n:n[0]}}class w extends _{_key="generated_text";constructor(e){super(e)}async _call(e,t={}){Array.isArray(e)||(e=[e]),this.model.config.prefix&&(e=e.map((e=>this.model.config.prefix+e)));const s=this.model.config.task_specific_params;s&&s[this.task]&&s[this.task].prefix&&(e=e.map((e=>s[this.task].prefix+e)));const o=this.tokenizer,n={padding:!0,truncation:!0};let r;r=this instanceof k&&"_build_translation_inputs"in o?o._build_translation_inputs(e,n,t).input_ids:o(e,n).input_ids;const a=await this.model.generate(r,t);return o.batch_decode(a,{skip_special_tokens:!0}).map((e=>({[this._key]:e})))}}class T extends w{_key="summary_text";constructor(e){super(e)}}class k extends w{_key="translation_text";constructor(e){super(e)}}class b extends _{constructor(e){super(e)}async _call(e,t={}){const s=Array.isArray(e);s||(e=[e]);const o=t.add_special_tokens??!1;this.tokenizer.padding_side="left";const{input_ids:n,attention_mask:r}=this.tokenizer(e,{add_special_tokens:o,padding:!0,truncation:!0}),a=await this.model.generate(n,t,null,{inputs_attention_mask:r}),i=this.tokenizer.batch_decode(a,{skip_special_tokens:!0}),l=Array.from({length:e.length},(e=>[]));for(let t=0;t<i.length;++t){l[Math.floor(t/a.length*e.length)].push({generated_text:i[t]})}return s||1!==l.length?l:l[0]}}class x extends _{constructor(e){super(e),this.label2id=Object.fromEntries(Object.entries(this.model.config.label2id).map((([e,t])=>[e.toLowerCase(),t]))),this.entailment_id=this.label2id.entailment,void 0===this.entailment_id&&(console.warn("Could not find 'entailment' in label2id mapping. Using 2 as entailment_id."),this.entailment_id=2),this.contradiction_id=this.label2id.contradiction??this.label2id.not_entailment,void 0===this.contradiction_id&&(console.warn("Could not find 'contradiction' in label2id mapping. Using 0 as contradiction_id."),this.contradiction_id=0)}async _call(e,t,{hypothesis_template:s="This example is {}.",multi_label:o=!1}={}){const n=Array.isArray(e);n||(e=[e]),Array.isArray(t)||(t=[t]);const r=t.map((e=>s.replace("{}",e))),a=o||1===t.length,l=[];for(const s of e){const e=[];for(const t of r){const o=this.tokenizer(s,{text_pair:t,padding:!0,truncation:!0}),n=await this.model(o);a?e.push([n.logits.data[this.contradiction_id],n.logits.data[this.entailment_id]]):e.push(n.logits.data[this.entailment_id])}const o=(a?e.map((e=>(0,i.softmax)(e)[1])):(0,i.softmax)(e)).map(((e,t)=>[e,t])).sort(((e,t)=>t[0]-e[0]));l.push({sequence:s,labels:o.map((e=>t[e[1]])),scores:o.map((e=>e[0]))})}return n?l:l[0]}}class y extends _{constructor(e){super(e)}async _call(e,{pooling:t="none",normalize:s=!1}={}){const o=this.tokenizer(e,{padding:!0,truncation:!0}),n=await this.model(o);let r=n.last_hidden_state??n.logits;if("none"===t);else if("mean"===t)r=(0,c.mean_pooling)(r,o.attention_mask);else{if("cls"!==t)throw Error(`Pooling method '${t}' not supported.`);r=r.slice(null,0)}return s&&(r=r.normalize(2,-1)),r}}class F extends _{constructor(e){super(e)}async _call(e,{pool:t=null}={}){const s=await u(e),{pixel_values:o}=await this.processor(s),n=await this.model({pixel_values:o});let r;if(t){if(!("pooler_output"in n))throw Error("No pooled output was returned. Make sure the model has a 'pooler' layer when using the 'pool' option.");r=n.pooler_output}else r=n.last_hidden_state??n.logits??n.image_embeds;return r}}class C extends _{constructor(e){super(e)}async _call(e,{topk:t=null}={}){const s=!Array.isArray(e),o=this.processor.feature_extractor.config.sampling_rate,n=await h(e,o),r=this.model.config.id2label,a=[];for(const e of n){const s=await this.processor(e),o=(await this.model(s)).logits[0],n=(0,i.getTopItems)((0,i.softmax)(o.data),t).map((e=>({label:r[e[0]],score:e[1]})));1===t?a.push(...n):a.push(n)}return s&&1!==t?a[0]:a}}class P extends _{constructor(e){super(e)}async _call(e,t,{hypothesis_template:s="This is a sound of {}."}={}){const o=!Array.isArray(e);o&&(e=[e]);const n=t.map((e=>s.replace("{}",e))),r=this.tokenizer(n,{padding:!0,truncation:!0}),a=this.processor.feature_extractor.config.sampling_rate,l=await h(e,a),c=[];for(const e of l){const s=await this.processor(e),o=await this.model({...r,...s}),n=(0,i.softmax)(o.logits_per_audio.data);c.push([...n].map(((e,s)=>({score:e,label:t[s]}))))}return o?c[0]:c}}class v extends _{constructor(e){super(e)}async _call(e,t={}){switch(this.model.config.model_type){case"whisper":return this._call_whisper(e,t);case"wav2vec2":case"wav2vec2-bert":case"unispeech":case"unispeech-sat":case"hubert":return this._call_wav2vec2(e,t);default:throw new Error(`AutomaticSpeechRecognitionPipeline does not support model type '${this.model.config.model_type}'.`)}}async _call_wav2vec2(e,t={}){t.language&&console.warn('`language` parameter is not yet supported for `wav2vec2` models, defaulting to "English".'),t.task&&console.warn('`task` parameter is not yet supported for `wav2vec2` models, defaulting to "transcribe".');const s=!Array.isArray(e);s&&(e=[e]);const o=this.processor.feature_extractor.config.sampling_rate,n=await h(e,o),r=[];for(const e of n){const t=await this.processor(e),s=(await this.model(t)).logits[0],o=[];for(const e of s)o.push((0,i.max)(e.data)[1]);const n=this.tokenizer.decode(o);r.push({text:n})}return s?r[0]:r}async _call_whisper(e,t={}){const s=t.return_timestamps??!1,o=t.chunk_length_s??0,n=t.chunk_callback??null,r=t.force_full_sequences??!1;let l=t.stride_length_s??null;"word"===s&&(t.return_token_timestamps=!0);const c=(0,a.pop)(t,"language",null),d=(0,a.pop)(t,"task",null);if(c||d||s){if(t.forced_decoder_ids)throw new Error("Cannot specify `language`/`task`/`return_timestamps` and `forced_decoder_ids` at the same time.");const e=this.tokenizer.get_decoder_prompt_ids({language:c,task:d,no_timestamps:!s});e.length>0&&(t.forced_decoder_ids=e)}const u=!Array.isArray(e);u&&(e=[e]);const p=this.processor.feature_extractor.config.chunk_length/this.model.config.max_source_positions,_=this.processor.feature_extractor.config.hop_length,m=this.processor.feature_extractor.config.sampling_rate,f=await h(e,m),g=[];for(const e of f){let a=[];if(o>0){if(null===l)l=o/6;else if(o<=l)throw Error("`chunk_length_s` must be larger than `stride_length_s`.");const t=m*o,s=m*l,n=t-2*s;let r=0;for(;r<e.length;){const o=e.subarray(r,r+t),i=await this.processor(o),l=0===r,c=r+n>=e.length;a.push({stride:[o.length,l?0:s,c?0:s],input_features:i.input_features,is_last:c}),r+=n}}else a=[{stride:[e.length,0,0],input_features:(await this.processor(e)).input_features,is_last:!0}];for(const e of a){t.num_frames=Math.floor(e.stride[0]/_);const o=await this.model.generate(e.input_features,t);"word"===s?(e.tokens=o.sequences[0],e.token_timestamps=o.token_timestamps.tolist()[0].map((e=>(0,i.round)(e,2)))):e.tokens=o[0],e.stride=e.stride.map((e=>e/m)),null!==n&&n(e)}const[c,d]=this.tokenizer._decode_asr(a,{time_precision:p,return_timestamps:s,force_full_sequences:r});g.push({text:c,...d})}return u?g[0]:g}}class S extends _{constructor(e){super(e)}async _call(e,t={}){const s=Array.isArray(e),o=await u(e),{pixel_values:n}=await this.processor(o),r=[];for(const e of n){e.dims=[1,...e.dims];const s=await this.model.generate(e,t),o=this.tokenizer.batch_decode(s,{skip_special_tokens:!0}).map((e=>({generated_text:e.trim()})));r.push(o)}return s?r:r[0]}}class A extends _{constructor(e){super(e)}async _call(e,{topk:t=1}={}){const s=Array.isArray(e),o=await u(e),{pixel_values:n}=await this.processor(o),r=await this.model({pixel_values:n}),a=this.model.config.id2label,l=[];for(const e of r.logits){const s=(0,i.getTopItems)((0,i.softmax)(e.data),t).map((e=>({label:a[e[0]],score:e[1]})));1===t?l.push(...s):l.push(s)}return s||1===t?l:l[0]}}class L extends _{constructor(e){super(e),this.subtasks_mapping={panoptic:"post_process_panoptic_segmentation",instance:"post_process_instance_segmentation",semantic:"post_process_semantic_segmentation"}}async _call(e,{threshold:t=.5,mask_threshold:s=.5,overlap_mask_area_threshold:o=.8,label_ids_to_fuse:n=null,target_sizes:r=null,subtask:a=null}={}){if(Array.isArray(e)&&1!==e.length)throw Error("Image segmentation pipeline currently only supports a batch size of 1.");const i=await u(e),l=i.map((e=>[e.height,e.width])),{pixel_values:c,pixel_mask:h}=await this.processor(i),p=await this.model({pixel_values:c,pixel_mask:h});let _=null;if(null!==a)_=this.subtasks_mapping[a];else for(let[e,t]of Object.entries(this.subtasks_mapping))if(t in this.processor.feature_extractor){_=this.processor.feature_extractor[t].bind(this.processor.feature_extractor),a=e;break}const m=this.model.config.id2label,f=[];if("panoptic"===a||"instance"===a){const e=_(p,t,s,o,n,r??l)[0],a=e.segmentation;for(const t of e.segments_info){const e=new Uint8ClampedArray(a.data.length);for(let s=0;s<a.data.length;++s)a.data[s]===t.id&&(e[s]=255);const s=new d.RawImage(e,a.dims[1],a.dims[0],1);f.push({score:t.score,label:m[t.label_id],mask:s})}}else{if("semantic"!==a)throw Error(`Subtask ${a} not supported.`);{const{segmentation:e,labels:t}=_(p,r??l)[0];for(const s of t){const t=new Uint8ClampedArray(e.data.length);for(let o=0;o<e.data.length;++o)e.data[o]===s&&(t[o]=255);const o=new d.RawImage(t,e.dims[1],e.dims[0],1);f.push({score:null,label:m[s],mask:o})}}}return f}}class E extends _{constructor(e){super(e)}async _call(e,t,{hypothesis_template:s="This is a photo of {}"}={}){const o=Array.isArray(e),n=await u(e),r=t.map((e=>s.replace("{}",e))),a=this.tokenizer(r,{padding:"siglip"!==this.model.config.model_type||"max_length",truncation:!0}),{pixel_values:l}=await this.processor(n),c=await this.model({...a,pixel_values:l}),d="siglip"===this.model.config.model_type?e=>e.sigmoid().data:e=>(0,i.softmax)(e.data),h=[];for(const e of c.logits_per_image){const s=[...d(e)].map(((e,s)=>({score:e,label:t[s]})));s.sort(((e,t)=>t.score-e.score)),h.push(s)}return o?h:h[0]}}class z extends _{constructor(e){super(e)}async _call(e,{threshold:t=.9,percentage:s=!1}={}){const o=Array.isArray(e);if(o&&1!==e.length)throw Error("Object detection pipeline currently only supports a batch size of 1.");const n=await u(e),r=s?null:n.map((e=>[e.height,e.width])),{pixel_values:a,pixel_mask:i}=await this.processor(n),l=await this.model({pixel_values:a,pixel_mask:i}),c=this.processor.feature_extractor.post_process_object_detection(l,t,r),d=this.model.config.id2label,h=c.map((e=>e.boxes.map(((t,o)=>({score:e.scores[o],label:d[e.classes[o]],box:p(t,!s)})))));return o?h:h[0]}}class B extends _{constructor(e){super(e)}async _call(e,t,{threshold:s=.1,topk:o=null,percentage:n=!1}={}){const r=Array.isArray(e),a=await u(e),i=this.tokenizer(t,{padding:!0,truncation:!0}),l=await this.processor(a),c=[];for(let e=0;e<a.length;++e){const r=a[e],d=n?null:[[r.height,r.width]],u=l.pixel_values[e].unsqueeze_(0),h=await this.model({...i,pixel_values:u}),_=this.processor.feature_extractor.post_process_object_detection(h,s,d,!0)[0];let m=_.boxes.map(((e,s)=>({score:_.scores[s],label:t[_.classes[s]],box:p(e,!n)}))).sort(((e,t)=>t.score-e.score));null!==o&&(m=m.slice(0,o)),c.push(m)}return r?c:c[0]}}class I extends _{constructor(e){super(e)}async _call(e,t,s={}){const o=(await u(e))[0],{pixel_values:n}=await this.processor(o),r=`<s_docvqa><s_question>${t}</s_question><s_answer>`,a=this.tokenizer(r,{add_special_tokens:!1,padding:!0,truncation:!0}).input_ids,i=await this.model.generate(n,{...s,decoder_input_ids:a,max_length:this.model.config.decoder.max_position_embeddings}),l=this.tokenizer.batch_decode(i)[0].match(/<s_answer>(.*?)<\/s_answer>/);let c=null;return l&&l.length>=2&&(c=l[1].trim()),[{answer:c}]}}class O extends _{DEFAULT_VOCODER_ID="Xenova/speecht5_hifigan";constructor(e){super(e),this.vocoder=e.vocoder??null}async _call(e,{speaker_embeddings:t=null}={}){return this.processor?this._call_text_to_spectrogram(e,{speaker_embeddings:t}):this._call_text_to_waveform(e)}async _call_text_to_waveform(e){const t=this.tokenizer(e,{padding:!0,truncation:!0}),{waveform:s}=await this.model(t),o=this.model.config.sampling_rate;return{audio:s.data,sampling_rate:o}}async _call_text_to_spectrogram(e,{speaker_embeddings:t}){if(this.vocoder||(console.log("No vocoder specified, using default HifiGan vocoder."),this.vocoder=await n.AutoModel.from_pretrained(this.DEFAULT_VOCODER_ID,{quantized:!1})),("string"==typeof t||t instanceof URL)&&(t=new Float32Array(await(await fetch(t)).arrayBuffer())),t instanceof Float32Array)t=new c.Tensor("float32",t,[1,t.length]);else if(!(t instanceof c.Tensor))throw new Error("Speaker embeddings must be a `Tensor`, `Float32Array`, `string`, or `URL`.");const{input_ids:s}=this.tokenizer(e,{padding:!0,truncation:!0}),{waveform:o}=await this.model.generate_speech(s,t,{vocoder:this.vocoder}),r=this.processor.feature_extractor.config.sampling_rate;return{audio:o.data,sampling_rate:r}}}class D extends _{constructor(e){super(e)}async _call(e){const t=await u(e),s=await this.processor(t),o=await this.model(s),n=[];for(const e of o.reconstruction){const t=e.squeeze().clamp_(0,1).mul_(255).round_().to("uint8");n.push(d.RawImage.fromTensor(t))}return n.length>1?n:n[0]}}class N extends _{constructor(e){super(e)}async _call(e){const t=await u(e),s=await this.processor(t),{predicted_depth:o}=await this.model(s),n=[];for(let e=0;e<t.length;++e){const s=(0,c.interpolate)(o[e],t[e].size.reverse(),"bilinear",!1),r=s.mul_(255/(0,i.max)(s.data)[0]).to("uint8");n.push({predicted_depth:o[e],depth:d.RawImage.fromTensor(r)})}return n.length>1?n:n[0]}}const V=Object.freeze({"text-classification":{tokenizer:o.AutoTokenizer,pipeline:m,model:n.AutoModelForSequenceClassification,default:{model:"Xenova/distilbert-base-uncased-finetuned-sst-2-english"},type:"text"},"token-classification":{tokenizer:o.AutoTokenizer,pipeline:f,model:n.AutoModelForTokenClassification,default:{model:"Xenova/bert-base-multilingual-cased-ner-hrl"},type:"text"},"question-answering":{tokenizer:o.AutoTokenizer,pipeline:g,model:n.AutoModelForQuestionAnswering,default:{model:"Xenova/distilbert-base-cased-distilled-squad"},type:"text"},"fill-mask":{tokenizer:o.AutoTokenizer,pipeline:M,model:n.AutoModelForMaskedLM,default:{model:"Xenova/bert-base-uncased"},type:"text"},summarization:{tokenizer:o.AutoTokenizer,pipeline:T,model:n.AutoModelForSeq2SeqLM,default:{model:"Xenova/distilbart-cnn-6-6"},type:"text"},translation:{tokenizer:o.AutoTokenizer,pipeline:k,model:n.AutoModelForSeq2SeqLM,default:{model:"Xenova/t5-small"},type:"text"},"text2text-generation":{tokenizer:o.AutoTokenizer,pipeline:w,model:n.AutoModelForSeq2SeqLM,default:{model:"Xenova/flan-t5-small"},type:"text"},"text-generation":{tokenizer:o.AutoTokenizer,pipeline:b,model:n.AutoModelForCausalLM,default:{model:"Xenova/gpt2"},type:"text"},"zero-shot-classification":{tokenizer:o.AutoTokenizer,pipeline:x,model:n.AutoModelForSequenceClassification,default:{model:"Xenova/distilbert-base-uncased-mnli"},type:"text"},"audio-classification":{pipeline:C,model:n.AutoModelForAudioClassification,processor:r.AutoProcessor,default:{model:"Xenova/wav2vec2-base-superb-ks"},type:"audio"},"zero-shot-audio-classification":{tokenizer:o.AutoTokenizer,pipeline:P,model:n.AutoModel,processor:r.AutoProcessor,default:{model:"Xenova/clap-htsat-unfused"},type:"multimodal"},"automatic-speech-recognition":{tokenizer:o.AutoTokenizer,pipeline:v,model:[n.AutoModelForSpeechSeq2Seq,n.AutoModelForCTC],processor:r.AutoProcessor,default:{model:"Xenova/whisper-tiny.en"},type:"multimodal"},"text-to-audio":{tokenizer:o.AutoTokenizer,pipeline:O,model:[n.AutoModelForTextToWaveform,n.AutoModelForTextToSpectrogram],processor:[r.AutoProcessor,null],default:{model:"Xenova/speecht5_tts"},type:"text"},"image-to-text":{tokenizer:o.AutoTokenizer,pipeline:S,model:n.AutoModelForVision2Seq,processor:r.AutoProcessor,default:{model:"Xenova/vit-gpt2-image-captioning"},type:"multimodal"},"image-classification":{pipeline:A,model:n.AutoModelForImageClassification,processor:r.AutoProcessor,default:{model:"Xenova/vit-base-patch16-224"},type:"multimodal"},"image-segmentation":{pipeline:L,model:[n.AutoModelForImageSegmentation,n.AutoModelForSemanticSegmentation],processor:r.AutoProcessor,default:{model:"Xenova/detr-resnet-50-panoptic"},type:"multimodal"},"zero-shot-image-classification":{tokenizer:o.AutoTokenizer,pipeline:E,model:n.AutoModel,processor:r.AutoProcessor,default:{model:"Xenova/clip-vit-base-patch32"},type:"multimodal"},"object-detection":{pipeline:z,model:n.AutoModelForObjectDetection,processor:r.AutoProcessor,default:{model:"Xenova/detr-resnet-50"},type:"multimodal"},"zero-shot-object-detection":{tokenizer:o.AutoTokenizer,pipeline:B,model:n.AutoModelForZeroShotObjectDetection,processor:r.AutoProcessor,default:{model:"Xenova/owlvit-base-patch32"},type:"multimodal"},"document-question-answering":{tokenizer:o.AutoTokenizer,pipeline:I,model:n.AutoModelForDocumentQuestionAnswering,processor:r.AutoProcessor,default:{model:"Xenova/donut-base-finetuned-docvqa"},type:"multimodal"},"image-to-image":{pipeline:D,model:n.AutoModelForImageToImage,processor:r.AutoProcessor,default:{model:"Xenova/swin2SR-classical-sr-x2-64"},type:"image"},"depth-estimation":{pipeline:N,model:n.AutoModelForDepthEstimation,processor:r.AutoProcessor,default:{model:"Xenova/dpt-large"},type:"image"},"feature-extraction":{tokenizer:o.AutoTokenizer,pipeline:y,model:n.AutoModel,default:{model:"Xenova/all-MiniLM-L6-v2"},type:"text"},"image-feature-extraction":{processor:r.AutoProcessor,pipeline:F,model:[n.AutoModelForImageFeatureExtraction,n.AutoModel],default:{model:"Xenova/vit-base-patch16-224-in21k"},type:"image"}}),q=Object.freeze({"sentiment-analysis":"text-classification",ner:"token-classification",asr:"automatic-speech-recognition","text-to-speech":"text-to-audio",embeddings:"feature-extraction"});async function j(e,t=null,{quantized:s=!0,progress_callback:o=null,config:n=null,cache_dir:r=null,local_files_only:i=!1,revision:l="main"}={}){e=q[e]??e;const c=V[e.split("_",1)[0]];if(!c)throw Error(`Unsupported pipeline: ${e}. Must be one of [${Object.keys(V)}]`);t||(t=c.default.model,console.log(`No model specified. Using default model: "${t}".`));const d={quantized:s,progress_callback:o,config:n,cache_dir:r,local_files_only:i,revision:l},u=new Map([["tokenizer",c.tokenizer],["model",c.model],["processor",c.processor]]),h=await async function(e,t,s){const o=Object.create(null),n=[];for(let[r,a]of e.entries()){if(!a)continue;let e;e=Array.isArray(a)?new Promise((async(e,o)=>{let n;for(let o of a){if(null===o)return void e(null);try{return void e(await o.from_pretrained(t,s))}catch(e){n=e}}o(n)})):a.from_pretrained(t,s),o[r]=e,n.push(e)}await Promise.all(n);for(let[e,t]of Object.entries(o))o[e]=await t;return o}(u,t,d);h.task=e,(0,a.dispatchCallback)(o,{status:"ready",task:e,model:t});return new(0,c.pipeline)(h)}},"./src/processors.js":
/*!***************************!*\
!*** ./src/processors.js ***!
\***************************/(e,t,s)=>{s.r(t),s.d(t,{ASTFeatureExtractor:()=>G,AutoProcessor:()=>Z,BeitFeatureExtractor:()=>E,BitImageProcessor:()=>M,CLIPFeatureExtractor:()=>T,ChineseCLIPFeatureExtractor:()=>k,ClapFeatureExtractor:()=>W,ConvNextFeatureExtractor:()=>x,ConvNextImageProcessor:()=>y,DPTFeatureExtractor:()=>f,DPTImageProcessor:()=>g,DeiTFeatureExtractor:()=>L,DetrFeatureExtractor:()=>I,DonutFeatureExtractor:()=>z,EfficientNetImageProcessor:()=>P,FeatureExtractor:()=>p,GLPNFeatureExtractor:()=>w,ImageFeatureExtractor:()=>_,MobileViTFeatureExtractor:()=>v,NougatImageProcessor:()=>B,OwlViTFeatureExtractor:()=>S,OwlViTProcessor:()=>J,Owlv2ImageProcessor:()=>A,Processor:()=>U,SamImageProcessor:()=>D,SamProcessor:()=>X,SeamlessM4TFeatureExtractor:()=>R,SegformerFeatureExtractor:()=>m,SiglipImageProcessor:()=>b,SpeechT5FeatureExtractor:()=>$,SpeechT5Processor:()=>Y,Swin2SRImageProcessor:()=>N,ViTFeatureExtractor:()=>F,ViTImageProcessor:()=>C,VitMatteImageProcessor:()=>V,Wav2Vec2FeatureExtractor:()=>j,Wav2Vec2ProcessorWithLM:()=>H,WhisperFeatureExtractor:()=>q,WhisperProcessor:()=>Q,YolosFeatureExtractor:()=>O});var o=s(/*! ./utils/core.js */"./src/utils/core.js"),n=s(/*! ./utils/hub.js */"./src/utils/hub.js"),r=s(/*! ./utils/maths.js */"./src/utils/maths.js"),a=s(/*! ./utils/tensor.js */"./src/utils/tensor.js"),i=(s(/*! ./utils/image.js */"./src/utils/image.js"),s(/*! ./utils/audio.js */"./src/utils/audio.js"));function l([e,t,s,o]){return[e-s/2,t-o/2,e+s/2,t+o/2]}function c(e,t=.5,s=null,o=!1){const n=e.logits,a=e.pred_boxes,[i,c,d]=n.dims;if(null!==s&&s.length!==i)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");let u=[];for(let e=0;e<i;++e){let i=null!==s?s[e]:null,h={boxes:[],classes:[],scores:[]},p=n[e],_=a[e];for(let e=0;e<c;++e){let s,n=p[e],a=[];if(o){s=n.sigmoid().data;for(let e=0;e<s.length;++e)s[e]>t&&a.push(e)}else{let e=(0,r.max)(n.data)[1];if(e===d-1)continue;a.push(e),s=(0,r.softmax)(n.data)}for(const t of a){let o=_[e].data;o=l(o),null!==i&&(o=o.map(((e,t)=>e*i[(t+1)%2]))),h.boxes.push(o),h.classes.push(t),h.scores.push(s[t])}}u.push(h)}return u}function d(e,t){if(!(e instanceof Float32Array||e instanceof Float64Array))throw new Error(`${t} expects input to be a Float32Array or a Float64Array, but got ${e?.constructor?.name??typeof e} instead. If using the feature extractor directly, remember to use \`read_audio(url, sampling_rate)\` to obtain the raw audio data of the file/url.`)}function u(e,t,s=0,o=null){const n=e/t;let a=(0,r.bankers_round)(n)*t;return null!==o&&a>o&&(a=Math.floor(n)*t),a<s&&(a=Math.ceil(n)*t),a}function h([e,t],s){return[Math.max(Math.floor(e/s),1)*s,Math.max(Math.floor(t/s),1)*s]}class p extends o.Callable{constructor(e){super(),this.config=e}}class _ extends p{constructor(e){super(e),this.image_mean=this.config.image_mean??this.config.mean,this.image_std=this.config.image_std??this.config.std,this.resample=this.config.resample??2,this.do_rescale=this.config.do_rescale??!0,this.rescale_factor=this.config.rescale_factor??1/255,this.do_normalize=this.config.do_normalize,this.do_resize=this.config.do_resize,this.do_thumbnail=this.config.do_thumbnail,this.size=this.config.size,this.size_divisibility=this.config.size_divisibility??this.config.size_divisor,this.do_center_crop=this.config.do_center_crop,this.crop_size=this.config.crop_size,this.do_convert_rgb=this.config.do_convert_rgb??!0,this.do_crop_margin=this.config.do_crop_margin,this.pad_size=this.config.pad_size,this.do_pad=this.config.do_pad,this.do_pad&&!this.pad_size&&this.size&&void 0!==this.size.width&&void 0!==this.size.height&&(this.pad_size=this.size)}async thumbnail(e,t,s=2){const o=e.height,n=e.width,r=t.height,a=t.width;let i=Math.min(o,r),l=Math.min(n,a);return i===o&&l===n?e:(o>n?l=Math.floor(n*i/o):n>o&&(i=Math.floor(o*l/n)),await e.resize(l,i,{resample:s}))}async crop_margin(e,t=200){const s=e.clone().grayscale(),o=(0,r.min)(s.data)[0],n=(0,r.max)(s.data)[0]-o;if(0===n)return e;const a=t/255;let i=s.width,l=s.height,c=0,d=0;for(let e=0;e<s.height;++e){const t=e*s.width;for(let r=0;r<s.width;++r)(s.data[t+r]-o)/n<a&&(i=Math.min(i,r),l=Math.min(l,e),c=Math.max(c,r),d=Math.max(d,e))}return e=await e.crop([i,l,c,d])}pad_image(e,t,s,{mode:n="constant",center:r=!1,constant_values:a=0}={}){const[i,l,c]=t;let d,u;if("number"==typeof s?(d=s,u=s):(d=s.width,u=s.height),d!==l||u!==i){const s=new Float32Array(d*u*c);if(Array.isArray(a))for(let e=0;e<s.length;++e)s[e]=a[e%c];else 0!==a&&s.fill(a);const[h,p]=r?[Math.floor((d-l)/2),Math.floor((u-i)/2)]:[0,0];for(let t=0;t<i;++t){const o=(t+p)*d,n=t*l;for(let t=0;t<l;++t){const r=(o+t+h)*c,a=(n+t)*c;for(let t=0;t<c;++t)s[r+t]=e[a+t]}}if("symmetric"===n){if(r)throw new Error("`center` padding is not supported when `mode` is set to `symmetric`.");const t=i-1,n=l-1;for(let r=0;r<u;++r){const a=r*d,u=(0,o.calculateReflectOffset)(r,t)*l;for(let t=0;t<d;++t){if(r<i&&t<l)continue;const d=(a+t)*c,h=(u+(0,o.calculateReflectOffset)(t,n))*c;for(let t=0;t<c;++t)s[d+t]=e[h+t]}}}e=s,t=[u,d,c]}return[e,t]}rescale(e){for(let t=0;t<e.length;++t)e[t]=this.rescale_factor*e[t]}get_resize_output_image_size(e,t){const[s,o]=e.size;let n,r;if(this.do_thumbnail){const{height:e,width:s}=t;n=Math.min(e,s)}else Number.isInteger(t)?(n=t,r=this.config.max_size??n):void 0!==t&&(n=t.shortest_edge,r=t.longest_edge);if(void 0!==n||void 0!==r){const e=void 0===n?1:Math.max(n/s,n/o),t=s*e,a=o*e,i=void 0===r?1:Math.min(r/t,r/a);let l=Math.floor(Number((t*i).toFixed(2))),c=Math.floor(Number((a*i).toFixed(2)));return void 0!==this.size_divisibility&&([l,c]=h([l,c],this.size_divisibility)),[l,c]}if(void 0!==t&&void 0!==t.width&&void 0!==t.height){let e=t.width,n=t.height;if(this.config.keep_aspect_ratio&&this.config.ensure_multiple_of){let t=n/o,r=e/s;Math.abs(1-r)<Math.abs(1-t)?t=r:r=t,n=u(t*o,this.config.ensure_multiple_of),e=u(r*s,this.config.ensure_multiple_of)}return[e,n]}if(void 0!==this.size_divisibility)return h([s,o],this.size_divisibility);throw new Error(`Could not resize image due to unsupported \`this.size\` option in config: ${JSON.stringify(t)}`)}async resize(e){const[t,s]=this.get_resize_output_image_size(e,this.size);return await e.resize(t,s,{resample:this.resample})}async preprocess(e,{do_normalize:t=null,do_pad:s=null,do_convert_rgb:o=null,do_convert_grayscale:n=null}={}){this.do_crop_margin&&(e=await this.crop_margin(e));const[r,i]=e.size;if(o??this.do_convert_rgb?e=e.rgb():n&&(e=e.grayscale()),this.do_resize&&(e=await this.resize(e)),this.do_thumbnail&&(e=await this.thumbnail(e,this.size,this.resample)),this.do_center_crop){let t,s;Number.isInteger(this.crop_size)?(t=this.crop_size,s=this.crop_size):(t=this.crop_size.width,s=this.crop_size.height),e=await e.center_crop(t,s)}const l=[e.height,e.width];let c=Float32Array.from(e.data),d=[e.height,e.width,e.channels];if(this.do_rescale&&this.rescale(c),t??this.do_normalize){let t=this.image_mean;Array.isArray(this.image_mean)||(t=new Array(e.channels).fill(t));let s=this.image_std;if(Array.isArray(this.image_std)||(s=new Array(e.channels).fill(t)),t.length!==e.channels||s.length!==e.channels)throw new Error(`When set to arrays, the length of \`image_mean\` (${t.length}) and \`image_std\` (${s.length}) must match the number of channels in the image (${e.channels}).`);for(let o=0;o<c.length;o+=e.channels)for(let n=0;n<e.channels;++n)c[o+n]=(c[o+n]-t[n])/s[n]}if(s??this.do_pad)if(this.pad_size){const t=this.pad_image(c,[e.height,e.width,e.channels],this.pad_size);[c,d]=t}else if(this.size_divisibility){const[e,t]=h([d[1],d[0]],this.size_divisibility);[c,d]=this.pad_image(c,d,{width:e,height:t})}return{original_size:[i,r],reshaped_input_size:l,pixel_values:new a.Tensor("float32",c,d).permute(2,0,1)}}async _call(e,...t){Array.isArray(e)||(e=[e]);const s=await Promise.all(e.map((e=>this.preprocess(e))));return{pixel_values:(0,a.stack)(s.map((e=>e.pixel_values)),0),original_sizes:s.map((e=>e.original_size)),reshaped_input_sizes:s.map((e=>e.reshaped_input_size))}}}class m extends _{post_process_semantic_segmentation(e,t=null){const s=e.logits,o=s.dims[0];if(null!==t&&t.length!==o)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");const n=[];for(let e=0;e<o;++e){const o=null!==t?t[e]:null;let r=s[e];null!==o&&(r=(0,a.interpolate)(r,o,"bilinear",!1));const[i,l]=o??r.dims.slice(-2),c=new a.Tensor("int32",new Int32Array(i*l),[i,l]),d=r[0].data;for(let e=1;e<r.dims[0];++e){const t=r[e].data;for(let s=0;s<t.length;++s)t[s]>d[s]&&(d[s]=t[s],c.data[s]=e)}const u=new Array(r.dims[0]),h=c.data;for(let e=0;e<h.length;++e){const t=h[e];u[t]=t}const p=u.filter((e=>void 0!==e));n.push({segmentation:c,labels:p})}return n}}class f extends _{}class g extends f{}class M extends _{}class w extends _{}class T extends _{}class k extends _{}class b extends _{}class x extends _{constructor(e){super(e),this.crop_pct=this.config.crop_pct??.875}async resize(e){const t=this.size?.shortest_edge;if(void 0===t)throw new Error("Size dictionary must contain 'shortest_edge' key.");if(t<384){const s=Math.floor(t/this.crop_pct),[o,n]=this.get_resize_output_image_size(e,{shortest_edge:s});e=await e.resize(o,n,{resample:this.resample}),e=await e.center_crop(t,t)}else e=await e.resize(t,t,{resample:this.resample});return e}}class y extends x{}class F extends _{}class C extends _{}class P extends _{constructor(e){super(e),this.include_top=this.config.include_top??!0,this.include_top&&(this.image_std=this.image_std.map((e=>e*e)))}}class v extends _{}class S extends _{post_process_object_detection(...e){return c(...e)}}class A extends S{}class L extends _{}class E extends _{}class z extends _{pad_image(e,t,s,o={}){const[n,r,a]=t;let i=this.image_mean;Array.isArray(this.image_mean)||(i=new Array(a).fill(i));let l=this.image_std;Array.isArray(l)||(l=new Array(a).fill(i));const c=i.map(((e,t)=>-e/l[t]));return super.pad_image(e,t,s,{center:!0,constant_values:c,...o})}}class B extends z{}class I extends _{async _call(e){const t=await super._call(e),s=[t.pixel_values.dims[0],64,64],o=new a.Tensor("int64",new BigInt64Array(s.reduce(((e,t)=>e*t))).fill(1n),s);return{...t,pixel_mask:o}}post_process_object_detection(...e){return c(...e)}remove_low_and_no_objects(e,t,s,o){let n=[],a=[],i=[];for(let l=0;l<e.dims[0];++l){let c=e[l],d=t[l],u=(0,r.max)(c.data)[1];if(u===o)continue;let h=(0,r.softmax)(c.data)[u];h>s&&(n.push(d),a.push(h),i.push(u))}return[n,a,i]}check_segment_validity(e,t,s,o=.5,n=.8){let r=[],a=0,i=0;for(let n=0;n<e.length;++n)e[n]===s&&(r.push(n),++a),t[s].data[n]>=o&&++i;let l=a>0&&i>0;if(l){l=a/i>n}return[l,r]}compute_segments(e,t,s,o,n,r=null,i=null){let[l,c]=i??e[0].dims,d=new a.Tensor("int32",new Int32Array(l*c),[l,c]),u=[];if(null!==i)for(let t=0;t<e.length;++t)e[t]=(0,a.interpolate)(e[t],i,"bilinear",!1);let h=new Int32Array(e[0].data.length),p=new Float32Array(e[0].data.length);for(let s=0;s<e.length;++s){let o=t[s];for(let t=0;t<e[s].data.length;++t)e[s].data[t]*=o,e[s].data[t]>p[t]&&(h[t]=s,p[t]=e[s].data[t])}let _=0;for(let r=0;r<s.length;++r){let a=s[r],[i,l]=this.check_segment_validity(h,e,r,o,n);if(i){++_;for(let e of l)d.data[e]=_;u.push({id:_,label_id:a,score:t[r]})}}return[d,u]}post_process_panoptic_segmentation(e,t=.5,s=.5,o=.8,n=null,r=null){null===n&&(console.warn("`label_ids_to_fuse` unset. No instance will be fused."),n=new Set);const i=e.logits,l=e.pred_masks.sigmoid();let[c,d,u]=i.dims;if(u-=1,null!==r&&r.length!==c)throw Error("Make sure that you pass in as many target sizes as the batch dimension of the logits");let h=[];for(let e=0;e<c;++e){let c=null!==r?r[e]:null,d=i[e],p=l[e],[_,m,f]=this.remove_low_and_no_objects(d,p,t,u);if(0===f.length){let[e,t]=c??p.dims.slice(-2),s=new a.Tensor("int32",new Int32Array(e*t).fill(-1),[e,t]);h.push({segmentation:s,segments_info:[]});continue}let[g,M]=this.compute_segments(_,m,f,s,o,n,c);h.push({segmentation:g,segments_info:M})}return h}post_process_instance_segmentation(){throw Error("Not implemented yet")}}class O extends _{post_process_object_detection(...e){return c(...e)}}class D extends _{reshape_input_points(e,t,s){e=structuredClone(e);let n=(0,o.calculateDimensions)(e);if(3===n.length)n=[1,...n],e=[e];else if(4!==n.length)throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.");for(let o=0;o<e.length;++o){let n=t[o],r=s[o],a=[r[0]/n[0],r[1]/n[1]];for(let t=0;t<e[o].length;++t)for(let s=0;s<e[o][t].length;++s)for(let n=0;n<e[o][t][s].length;++n)e[o][t][s][n]*=a[n]}return new a.Tensor("float32",Float32Array.from(e.flat(1/0)),n)}add_input_labels(e,t){let s=(0,o.calculateDimensions)(e);if(2===s.length)s=[1,...s],e=[e];else if(3!==s.length)throw Error("The input_points must be a 4D tensor of shape `batch_size`, `point_batch_size`, `nb_points_per_image`, `2`.");if(s.some(((e,s)=>e!==t.dims[s])))throw Error(`The first ${s.length} dimensions of 'input_points' and 'input_labels' must be the same.`);return new a.Tensor("int64",e.flat(1/0).map(BigInt),s)}async _call(e,t=null,s=null){const o=await super._call(e);if(t&&(o.input_points=this.reshape_input_points(t,o.original_sizes,o.reshaped_input_sizes)),s){if(!o.input_points)throw Error("`input_points` must be provided if `input_labels` are provided.");o.input_labels=this.add_input_labels(s,o.input_points)}return o}post_process_masks(e,t,s,{mask_threshold:o=0,binarize:n=!0,pad_size:r=null}={}){const i=[],l=[(r=r??this.pad_size).height,r.width];for(let r=0;r<t.length;++r){const c=t[r],d=s[r],u=e[r],h=[];for(let e=0;e<u.dims[0];++e){const t=u[e];let s=(0,a.interpolate)(t,l,"bilinear",!1);if(s=s.slice(null,[0,d[0]],[0,d[1]]),s=(0,a.interpolate)(s,c,"bilinear",!1),n){const e=new Uint8Array(s.data.length);for(let t=0;t<s.data.length;++t)s.data[t]>o&&(e[t]=1);s=new a.Tensor("bool",e,s.dims)}h.push(s)}i.push((0,a.stack)(h))}return i}}class N extends _{pad_image(e,t,s,o={}){const[n,r,a]=t;return super.pad_image(e,t,{width:r+(s-r%s)%s,height:n+(s-n%s)%s},{mode:"symmetric",center:!1,constant_values:-1,...o})}}class V extends _{async _call(e,t){Array.isArray(e)||(e=[e]),Array.isArray(t)||(t=[t]);const s=await Promise.all(e.map((e=>this.preprocess(e)))),o=await Promise.all(t.map((e=>this.preprocess(e,{do_normalize:!1,do_convert_rgb:!1,do_convert_grayscale:!0}))));return{pixel_values:(0,a.stack)(s.map(((e,t)=>(0,a.cat)([e.pixel_values,o[t].pixel_values],0))),0),original_sizes:s.map((e=>e.original_size)),reshaped_input_sizes:s.map((e=>e.reshaped_input_size))}}}class q extends p{constructor(e){super(e),this.config.mel_filters??=(0,i.mel_filter_bank)(Math.floor(1+this.config.n_fft/2),this.config.feature_size,0,8e3,this.config.sampling_rate,"slaney","slaney"),this.window=(0,i.window_function)(this.config.n_fft,"hann")}_extract_fbank_features(e){const{data:t,dims:s}=(0,i.spectrogram)(e,this.window,this.config.n_fft,this.config.hop_length,{power:2,mel_filters:this.config.mel_filters,log_mel:"log10",max_num_frames:this.config.nb_max_frames}),o=(0,r.max)(t)[0];for(let e=0;e<t.length;++e)t[e]=(Math.max(t[e],o-8)+4)/4;return{data:t,dims:s}}async _call(e){let t;d(e,"WhisperFeatureExtractor"),e.length>this.config.n_samples?(console.warn("Attempting to extract features for audio longer than 30 seconds. If using a pipeline to extract transcript from a long audio clip, remember to specify `chunk_length_s` and/or `stride_length_s`."),t=e.slice(0,this.config.n_samples)):(t=new Float32Array(this.config.n_samples),t.set(e));const{data:s,dims:o}=this._extract_fbank_features(t);return{input_features:new a.Tensor("float32",s,[1,...o])}}}class j extends p{_zero_mean_unit_var_norm(e){const t=e.reduce(((e,t)=>e+t),0)/e.length,s=e.reduce(((e,s)=>e+(s-t)**2),0)/e.length;return e.map((e=>(e-t)/Math.sqrt(s+1e-7)))}async _call(e){d(e,"Wav2Vec2FeatureExtractor"),e instanceof Float64Array&&(e=new Float32Array(e));let t=e;this.config.do_normalize&&(t=this._zero_mean_unit_var_norm(t));const s=[1,t.length];return{input_values:new a.Tensor("float32",t,s),attention_mask:new a.Tensor("int64",new BigInt64Array(t.length).fill(1n),s)}}}class R extends p{constructor(e){super(e);const t=this.config.sampling_rate,s=(0,i.mel_filter_bank)(256,this.config.num_mel_bins,20,Math.floor(t/2),t,null,"kaldi",!0);for(let e=0;e<s.length;++e)s[e].push(0);this.mel_filters=s,this.window=(0,i.window_function)(400,"povey",{periodic:!1})}_extract_fbank_features(e,t){return e=e.map((e=>32768*e)),(0,i.spectrogram)(e,this.window,400,160,{fft_length:512,power:2,center:!1,preemphasis:.97,mel_filters:this.mel_filters,log_mel:"log",mel_floor:1.192092955078125e-7,remove_dc_offset:!0,max_num_frames:t,transpose:!0})}async _call(e,{padding:t=!0,pad_to_multiple_of:s=2,do_normalize_per_mel_bins:o=!0,return_attention_mask:n=!0}={}){d(e,"SeamlessM4TFeatureExtractor");let r,i=this._extract_fbank_features(e,this.config.max_length);if(o){const[e,t]=i.dims;for(let s=0;s<t;++s){let o=0;for(let n=0;n<e;++n)o+=i.data[n*t+s];const n=o/e;let r=0;for(let o=0;o<e;++o)r+=(i.data[o*t+s]-n)**2;r/=e-1;const a=Math.sqrt(r+1e-7);for(let o=0;o<e;++o){const e=o*t+s;i.data[e]=(i.data[e]-n)/a}}}if(t){const[e,t]=i.dims,o=e%s;if(o>0){const s=new Float32Array(t*(e+o));s.set(i.data),s.fill(this.config.padding_value,i.data.length);const l=e+o;i={data:s,dims:[l,t]},n&&(r=new a.Tensor("int64",new BigInt64Array(l),[1,l]),r.data.fill(1n,0,e))}}const[l,c]=i.dims,u=this.config.stride;if(0!==l%u)throw new Error(`The number of frames (${l}) must be a multiple of the stride (${u}).`);const h=new a.Tensor("float32",i.data,i.dims).view(1,Math.floor(l/u),c*u),p={input_features:h};if(n){const e=h.dims[1],t=new a.Tensor("int64",new BigInt64Array(e),[1,e]);if(r)for(let e=1,s=0;e<l;e+=u,++s)t.data[s]=r.data[e];else t.data.fill(1n);p.attention_mask=t}return p}}class G extends p{constructor(e){super(e);const t=this.config.sampling_rate,s=(0,i.mel_filter_bank)(256,this.config.num_mel_bins,20,Math.floor(t/2),t,null,"kaldi",!0);for(let e=0;e<s.length;++e)s[e].push(0);this.mel_filters=s,this.window=(0,i.window_function)(400,"hann",{periodic:!1}),this.mean=this.config.mean,this.std=this.config.std}_extract_fbank_features(e,t){return(0,i.spectrogram)(e,this.window,400,160,{fft_length:512,power:2,center:!1,preemphasis:.97,mel_filters:this.mel_filters,log_mel:"log",mel_floor:1.192092955078125e-7,remove_dc_offset:!0,max_num_frames:t,transpose:!0})}async _call(e){d(e,"ASTFeatureExtractor");const t=this._extract_fbank_features(e,this.config.max_length);if(this.config.do_normalize){const e=2*this.std;for(let s=0;s<t.data.length;++s)t.data[s]=(t.data[s]-this.mean)/e}return{input_values:new a.Tensor("float32",t.data,[1,...t.dims])}}}class W extends p{constructor(e){super(e),this.mel_filters=(0,i.mel_filter_bank)(this.config.nb_frequency_bins,this.config.feature_size,this.config.frequency_min,this.config.frequency_max,this.config.sampling_rate,null,"htk"),this.mel_filters_slaney=(0,i.mel_filter_bank)(this.config.nb_frequency_bins,this.config.feature_size,this.config.frequency_min,this.config.frequency_max,this.config.sampling_rate,"slaney","slaney"),this.window=(0,i.window_function)(this.config.fft_window_size,"hann")}_get_input_mel(e,t,s,o){let n,r=!1;const a=e.length-t;if(a>0){if("rand_trunc"!==s)throw new Error(`Truncation strategy "${s}" not implemented`);{r=!0;const s=Math.floor(Math.random()*(a+1));e=e.subarray(s,s+t),n=this._extract_fbank_features(e,this.mel_filters_slaney,this.config.nb_max_samples),n.dims=[1,...n.dims]}}else{if(a<0){let s=new Float64Array(t);if(s.set(e),"repeat"===o)for(let o=e.length;o<t;o+=e.length)s.set(e.subarray(0,Math.min(e.length,t-o)),o);else if("repeatpad"===o)for(let t=e.length;t<-a;t+=e.length)s.set(e,t);e=s}if("fusion"===s)throw new Error(`Truncation strategy "${s}" not implemented`);n=this._extract_fbank_features(e,this.mel_filters_slaney,this.config.nb_max_samples),n.dims=[1,...n.dims]}return{...n,longer:r}}_extract_fbank_features(e,t,s=null){return(0,i.spectrogram)(e,this.window,this.config.fft_window_size,this.config.hop_length,{power:2,mel_filters:t,log_mel:"dB",max_num_frames:s,do_pad:!1,transpose:!0})}async _call(e,{max_length:t=null}={}){d(e,"ClapFeatureExtractor");const s=this._get_input_mel(e,t??this.config.nb_max_samples,this.config.truncation,this.config.padding);return{input_features:new a.Tensor("float32",s.data,[1,...s.dims])}}}class $ extends p{}class U extends o.Callable{constructor(e){super(),this.feature_extractor=e}async _call(e,...t){return await this.feature_extractor(e,...t)}}class X extends U{async _call(...e){return await this.feature_extractor(...e)}post_process_masks(...e){return this.feature_extractor.post_process_masks(...e)}reshape_input_points(...e){return this.feature_extractor.reshape_input_points(...e)}}class Q extends U{async _call(e){return await this.feature_extractor(e)}}class H extends U{async _call(e){return await this.feature_extractor(e)}}class Y extends U{async _call(e){return await this.feature_extractor(e)}}class J extends U{}class Z{static FEATURE_EXTRACTOR_CLASS_MAPPING={ImageFeatureExtractor:_,WhisperFeatureExtractor:q,ViTFeatureExtractor:F,MobileViTFeatureExtractor:v,OwlViTFeatureExtractor:S,Owlv2ImageProcessor:A,CLIPFeatureExtractor:T,ChineseCLIPFeatureExtractor:k,SiglipImageProcessor:b,ConvNextFeatureExtractor:x,ConvNextImageProcessor:y,SegformerFeatureExtractor:m,BitImageProcessor:M,DPTImageProcessor:g,DPTFeatureExtractor:f,GLPNFeatureExtractor:w,BeitFeatureExtractor:E,DeiTFeatureExtractor:L,DetrFeatureExtractor:I,YolosFeatureExtractor:O,DonutFeatureExtractor:z,NougatImageProcessor:B,EfficientNetImageProcessor:P,ViTImageProcessor:C,VitMatteImageProcessor:V,SamImageProcessor:D,Swin2SRImageProcessor:N,Wav2Vec2FeatureExtractor:j,SeamlessM4TFeatureExtractor:R,SpeechT5FeatureExtractor:$,ASTFeatureExtractor:G,ClapFeatureExtractor:W};static PROCESSOR_CLASS_MAPPING={WhisperProcessor:Q,Wav2Vec2ProcessorWithLM:H,SamProcessor:X,SpeechT5Processor:Y,OwlViTProcessor:J};static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:o=null,local_files_only:r=!1,revision:a="main"}={}){let i=s??await(0,n.getModelJSON)(e,"preprocessor_config.json",!0,{progress_callback:t,config:s,cache_dir:o,local_files_only:r,revision:a}),l=i.feature_extractor_type??i.image_processor_type,c=this.FEATURE_EXTRACTOR_CLASS_MAPPING[l];if(!c){if(void 0===i.size)throw new Error(`Unknown Feature Extractor type: ${l}`);console.warn(`Feature extractor type "${l}" not found, assuming ImageFeatureExtractor due to size parameter in config.`),c=_}return new(this.PROCESSOR_CLASS_MAPPING[i.processor_class]??U)(new c(i))}}},"./src/tokenizers.js":
/*!***************************!*\
!*** ./src/tokenizers.js ***!
\***************************/(e,t,s)=>{s.r(t),s.d(t,{AlbertTokenizer:()=>fe,AutoTokenizer:()=>dt,BartTokenizer:()=>Ae,BertTokenizer:()=>me,BlenderbotSmallTokenizer:()=>rt,BlenderbotTokenizer:()=>nt,BloomTokenizer:()=>Be,CLIPTokenizer:()=>et,CamembertTokenizer:()=>Fe,CodeGenTokenizer:()=>Ke,CodeLlamaTokenizer:()=>De,CohereTokenizer:()=>ct,ConvBertTokenizer:()=>be,DebertaTokenizer:()=>we,DebertaV2Tokenizer:()=>Te,DistilBertTokenizer:()=>ye,ElectraTokenizer:()=>Pe,EsmTokenizer:()=>Re,FalconTokenizer:()=>qe,GPT2Tokenizer:()=>Se,GPTNeoXTokenizer:()=>je,GemmaTokenizer:()=>We,Grok1Tokenizer:()=>$e,HerbertTokenizer:()=>ke,LlamaTokenizer:()=>Oe,M2M100Tokenizer:()=>Qe,MBart50Tokenizer:()=>Ee,MBartTokenizer:()=>Le,MPNetTokenizer:()=>Ve,MarianTokenizer:()=>st,MobileBertTokenizer:()=>ge,NllbTokenizer:()=>Xe,NougatTokenizer:()=>it,PreTrainedTokenizer:()=>_e,Qwen2Tokenizer:()=>Ge,RoFormerTokenizer:()=>xe,RobertaTokenizer:()=>ze,SiglipTokenizer:()=>tt,SpeechT5Tokenizer:()=>at,SqueezeBertTokenizer:()=>Me,T5Tokenizer:()=>ve,TokenizerModel:()=>M,VitsTokenizer:()=>lt,Wav2Vec2CTCTokenizer:()=>ot,WhisperTokenizer:()=>Ze,XLMRobertaTokenizer:()=>Ne,XLMTokenizer:()=>Ce});var o=s(/*! ./utils/core.js */"./src/utils/core.js"),n=s(/*! ./utils/hub.js */"./src/utils/hub.js"),r=s(/*! ./utils/maths.js */"./src/utils/maths.js"),a=s(/*! ./utils/tensor.js */"./src/utils/tensor.js"),i=s(/*! ./utils/data-structures.js */"./src/utils/data-structures.js"),l=s(/*! @huggingface/jinja */"./node_modules/@huggingface/jinja/dist/index.js");async function c(e,t){const s=await Promise.all([(0,n.getModelJSON)(e,"tokenizer.json",!0,t),(0,n.getModelJSON)(e,"tokenizer_config.json",!0,t)]);return null!==t.legacy&&(s[1].legacy=t.legacy),s}function d(e,t=!0){if(void 0!==e.Regex){let t=e.Regex.replace(/\\([#&~])/g,"$1");for(const[e,s]of f)t=t.replaceAll(e,s);return new RegExp(t,"gu")}if(void 0!==e.String){const s=(0,o.escapeRegExp)(e.String);return new RegExp(t?s:`(${s})`,"gu")}return console.warn("Unknown pattern type:",e),null}function u(e){return new Map(Object.entries(e))}function h(e){const t=e.dims;switch(t.length){case 1:return e.tolist();case 2:if(1!==t[0])throw new Error("Unable to decode tensor with `batch size !== 1`. Use `tokenizer.batch_decode(...)` for batched inputs.");return e.tolist()[0];default:throw new Error(`Expected tensor to have 1-2 dimensions, got ${t.length}.`)}}function p(e){return e.replace(/ \./g,".").replace(/ \?/g,"?").replace(/ \!/g,"!").replace(/ ,/g,",").replace(/ \' /g,"'").replace(/ n\'t/g,"n't").replace(/ \'m/g,"'m").replace(/ \'s/g,"'s").replace(/ \'ve/g,"'ve").replace(/ \'re/g,"'re")}function _(e){return e.replace(/[\u0300-\u036f]/g,"")}const m="\\p{P}\\u0021-\\u002F\\u003A-\\u0040\\u005B-\\u0060\\u007B-\\u007E",f=new Map([["(?i:'s|'t|'re|'ve|'m|'ll|'d)","(?:'([sS]|[tT]|[rR][eE]|[vV][eE]|[mM]|[lL][lL]|[dD]))"]]);class g{constructor(e){this.content=e.content,this.id=e.id,this.single_word=e.single_word??!1,this.lstrip=e.lstrip??!1,this.rstrip=e.rstrip??!1,this.special=e.special??!1,this.normalized=e.normalized??null}}class M extends o.Callable{constructor(e){super(),this.config=e,this.vocab=[],this.tokens_to_ids=new Map,this.unk_token_id=void 0,this.unk_token=void 0,this.end_of_word_suffix=void 0,this.fuse_unk=this.config.fuse_unk??!1}static fromConfig(e,...t){switch(e.type){case"WordPiece":return new w(e);case"Unigram":return new T(e,...t);case"BPE":return new x(e);default:if(e.vocab)return new y(e,...t);throw new Error(`Unknown TokenizerModel type: ${e.type}`)}}_call(e){let t=this.encode(e);return this.fuse_unk&&(t=function(e,t,s){const o=[];let n=0;for(;n<e.length;)if(o.push(e[n]),(s.get(e[n])??t)===t)for(;n<e.length&&(s.get(e[n])??t)===t;)++n;else++n;return o}(t,this.unk_token_id,this.tokens_to_ids)),t}encode(e){throw Error("encode should be implemented in subclass.")}convert_tokens_to_ids(e){return e.map((e=>this.tokens_to_ids.get(e)??this.unk_token_id))}convert_ids_to_tokens(e){return e.map((e=>this.vocab[e]??this.unk_token))}}class w extends M{constructor(e){super(e),this.tokens_to_ids=u(e.vocab),this.unk_token_id=this.tokens_to_ids.get(e.unk_token),this.unk_token=e.unk_token,this.max_input_chars_per_word=e.max_input_chars_per_word??100,this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e}encode(e){const t=[];for(const s of e){const e=[...s];if(e.length>this.max_input_chars_per_word){t.push(this.unk_token);continue}let o=!1,n=0;const r=[];for(;n<e.length;){let t=e.length,s=null;for(;n<t;){let o=e.slice(n,t).join("");if(n>0&&(o=this.config.continuing_subword_prefix+o),this.tokens_to_ids.has(o)){s=o;break}--t}if(null===s){o=!0;break}r.push(s),n=t}o?t.push(this.unk_token):t.push(...r)}return t}}class T extends M{constructor(e,t){super(e);const s=e.vocab.length;this.vocab=new Array(s),this.scores=new Array(s);for(let t=0;t<s;++t){const s=e.vocab[t];this.vocab[t]=s[0],this.scores[t]=s[1]}this.unk_token_id=e.unk_id,this.unk_token=this.vocab[e.unk_id],this.tokens_to_ids=new Map(this.vocab.map(((e,t)=>[e,t]))),this.bosToken=" ",this.bosTokenId=this.tokens_to_ids.get(this.bosToken),this.eosToken=t.eos_token,this.eosTokenId=this.tokens_to_ids.get(this.eosToken),this.unkToken=this.vocab[this.unk_token_id],this.minScore=(0,r.min)(this.scores)[0],this.unkScore=this.minScore-10,this.scores[this.unk_token_id]=this.unkScore,this.trie=new i.CharTrie,this.trie.extend(this.vocab),this.fuse_unk=!0}populateNodes(e){const t=e.sentence,s=t.length;let o=0;for(;o<s;){const s=1;let n=!1;const r=[];for(let a of this.trie.commonPrefixSearch(t.slice(o))){r.push(a);const t=this.tokens_to_ids.get(a),i=this.scores[t],l=a.length;e.insert(o,l,i,t),n||l!==s||(n=!0)}n||e.insert(o,s,this.unkScore,this.unk_token_id),o+=s}}tokenize(e){const t=new i.TokenLattice(e,this.bosTokenId,this.eosTokenId);return this.populateNodes(t),t.tokens()}encode(e){const t=[];for(const s of e){const e=this.tokenize(s);t.push(...e)}return t}}const k=(()=>{const e=[...Array.from({length:"~".charCodeAt(0)-"!".charCodeAt(0)+1},((e,t)=>t+"!".charCodeAt(0))),...Array.from({length:"¬".charCodeAt(0)-"¡".charCodeAt(0)+1},((e,t)=>t+"¡".charCodeAt(0))),...Array.from({length:"ÿ".charCodeAt(0)-"®".charCodeAt(0)+1},((e,t)=>t+"®".charCodeAt(0)))],t=e.slice();let s=0;for(let o=0;o<256;++o)e.includes(o)||(e.push(o),t.push(256+s),s+=1);const o=t.map((e=>String.fromCharCode(e)));return Object.fromEntries(e.map(((e,t)=>[e,o[t]])))})(),b=(0,o.reverseDictionary)(k);class x extends M{constructor(e){super(e),this.BPE_SPLIT_TOKEN=" ",this.tokens_to_ids=u(e.vocab),this.unk_token_id=this.tokens_to_ids.get(e.unk_token),this.unk_token=e.unk_token,this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e;this.bpe_ranks=new Map(e.merges.map(((e,t)=>[e,t]))),this.merges=e.merges.map((e=>e.split(this.BPE_SPLIT_TOKEN))),this.end_of_word_suffix=e.end_of_word_suffix,this.continuing_subword_suffix=e.continuing_subword_suffix??null,this.byte_fallback=this.config.byte_fallback??!1,this.byte_fallback&&(this.text_encoder=new TextEncoder),this.cache=new Map}bpe(e){if(0===e.length)return[];const t=this.cache.get(e);if(void 0!==t)return t;const s=Array.from(e);this.end_of_word_suffix&&(s[s.length-1]+=this.end_of_word_suffix);let o=[];if(s.length>1){const e=new i.PriorityQueue(((e,t)=>e.score<t.score));let t={token:s[0],bias:0,prev:null,next:null},n=t;for(let t=1;t<s.length;++t){const o={bias:t/s.length,token:s[t],prev:n,next:null};n.next=o,this._add_node(e,n),n=o}for(;!e.isEmpty();){const s=e.pop();if(s.deleted||!s.next||s.next.deleted)continue;if(s.deleted=!0,s.next.deleted=!0,s.prev){const e={...s.prev};s.prev.deleted=!0,s.prev=e,e.prev?e.prev.next=e:t=e}const o={token:s.token+s.next.token,bias:s.bias,prev:s.prev,next:s.next.next};o.prev?(o.prev.next=o,this._add_node(e,o.prev)):t=o,o.next&&(o.next.prev=o,this._add_node(e,o))}for(let e=t;null!==e;e=e.next)o.push(e.token)}else o=s;if(this.continuing_subword_suffix)for(let e=0;e<o.length-1;++e)o[e]+=this.continuing_subword_suffix;return this.cache.set(e,o),o}_add_node(e,t){const s=this.bpe_ranks.get(t.token+this.BPE_SPLIT_TOKEN+t.next.token);void 0!==s&&(t.score=s+t.bias,e.push(t))}encode(e){const t=[];for(const s of e){const e=this.bpe(s);for(const s of e)this.tokens_to_ids.has(s)?t.push(s):this.byte_fallback?t.push(...Array.from(this.text_encoder.encode(s)).map((e=>`<0x${e.toString(16).toUpperCase().padStart(2,"0")}>`))):t.push(this.unk_token)}return t}}class y extends M{constructor(e,t){super(e),this.tokens_to_ids=u(t.target_lang?e.vocab[t.target_lang]:e.vocab),this.bos_token=t.bos_token,this.bos_token_id=this.tokens_to_ids.get(this.bos_token),this.eos_token=t.eos_token,this.eos_token_id=this.tokens_to_ids.get(this.eos_token),this.pad_token=t.pad_token,this.pad_token_id=this.tokens_to_ids.get(this.pad_token),this.unk_token=t.unk_token,this.unk_token_id=this.tokens_to_ids.get(this.unk_token),this.vocab=new Array(this.tokens_to_ids.size);for(const[e,t]of this.tokens_to_ids)this.vocab[t]=e}encode(e){return e}}class F extends o.Callable{constructor(e){super(),this.config=e}static fromConfig(e){if(null===e)return null;switch(e.type){case"BertNormalizer":return new I(e);case"Precompiled":return new ae(e);case"Sequence":return new B(e);case"Replace":return new C(e);case"NFC":return new P(e);case"NFKC":return new v(e);case"NFKD":return new S(e);case"Strip":return new A(e);case"StripAccents":return new L(e);case"Lowercase":return new E(e);case"Prepend":return new z(e);default:throw new Error(`Unknown Normalizer type: ${e.type}`)}}normalize(e){throw Error("normalize should be implemented in subclass.")}_call(e){return this.normalize(e)}}class C extends F{normalize(e){const t=d(this.config.pattern);return null===t?e:e.replaceAll(t,this.config.content)}}class P extends F{normalize(e){return e=e.normalize("NFC")}}class v extends F{normalize(e){return e=e.normalize("NFKC")}}class S extends F{normalize(e){return e=e.normalize("NFKD")}}class A extends F{normalize(e){return this.config.strip_left&&this.config.strip_right?e=e.trim():(this.config.strip_left&&(e=e.trimStart()),this.config.strip_right&&(e=e.trimEnd())),e}}class L extends F{normalize(e){return e=_(e)}}class E extends F{normalize(e){return e=e.toLowerCase()}}class z extends F{normalize(e){return e=this.config.prepend+e}}class B extends F{constructor(e){super(e),this.normalizers=e.normalizers.map((e=>F.fromConfig(e)))}normalize(e){return this.normalizers.reduce(((e,t)=>t.normalize(e)),e)}}class I extends F{_tokenize_chinese_chars(e){const t=[];for(let s=0;s<e.length;++s){const o=e[s],n=o.charCodeAt(0);this._is_chinese_char(n)?(t.push(" "),t.push(o),t.push(" ")):t.push(o)}return t.join("")}_is_chinese_char(e){return e>=19968&&e<=40959||e>=13312&&e<=19903||e>=131072&&e<=173791||e>=173824&&e<=177983||e>=177984&&e<=178207||e>=178208&&e<=183983||e>=63744&&e<=64255||e>=194560&&e<=195103}stripAccents(e){return e.normalize("NFD").replace(/[\u0300-\u036f]/g,"")}_is_control(e){switch(e){case"\t":case"\n":case"\r":return!1;default:return/^\p{Cc}|\p{Cf}|\p{Co}|\p{Cs}$/u.test(e)}}_clean_text(e){const t=[];for(const s of e){const e=s.charCodeAt(0);0===e||65533===e||this._is_control(s)||(/^\s$/.test(s)?t.push(" "):t.push(s))}return t.join("")}normalize(e){return this.config.clean_text&&(e=this._clean_text(e)),this.config.handle_chinese_chars&&(e=this._tokenize_chinese_chars(e)),this.config.lowercase?(e=e.toLowerCase(),!1!==this.config.strip_accents&&(e=this.stripAccents(e))):this.config.strip_accents&&(e=this.stripAccents(e)),e}}class O extends o.Callable{static fromConfig(e){if(null===e)return null;switch(e.type){case"BertPreTokenizer":return new D(e);case"Sequence":return new ie(e);case"Whitespace":return new le(e);case"WhitespaceSplit":return new ce(e);case"Metaspace":return new ne(e);case"ByteLevel":return new N(e);case"Split":return new V(e);case"Punctuation":return new q(e);case"Digits":return new j(e);case"Replace":return new de(e);default:throw new Error(`Unknown PreTokenizer type: ${e.type}`)}}pre_tokenize_text(e,t){throw Error("pre_tokenize_text should be implemented in subclass.")}pre_tokenize(e,t){return(Array.isArray(e)?e.map((e=>this.pre_tokenize_text(e,t))):this.pre_tokenize_text(e,t)).flat()}_call(e,t){return this.pre_tokenize(e,t)}}class D extends O{constructor(e){super(),this.pattern=new RegExp(`[^\\s${m}]+|[${m}]`,"gu")}pre_tokenize_text(e,t){return e.trim().match(this.pattern)||[]}}class N extends O{constructor(e){super(),this.config=e,this.add_prefix_space=this.config.add_prefix_space,this.trim_offsets=this.config.trim_offsets,this.use_regex=this.config.use_regex??!0,this.pattern=/'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+/gu,this.byte_encoder=k,this.text_encoder=new TextEncoder}pre_tokenize_text(e,t){this.add_prefix_space&&!e.startsWith(" ")&&(e=" "+e);return(this.use_regex?e.match(this.pattern)||[]:[e]).map((e=>Array.from(this.text_encoder.encode(e),(e=>this.byte_encoder[e])).join("")))}}class V extends O{constructor(e){super(),this.config=e,this.pattern=d(this.config.pattern,this.config.invert)}pre_tokenize_text(e,t){return null===this.pattern?[]:this.config.invert?e.match(this.pattern)||[]:function(e,t){const s=[];let o=0;for(const n of e.matchAll(t)){const t=n[0];o<n.index&&s.push(e.slice(o,n.index)),t.length>0&&s.push(t),o=n.index+t.length}return o<e.length&&s.push(e.slice(o)),s}(e,this.pattern)}}class q extends O{constructor(e){super(),this.config=e,this.pattern=new RegExp(`[^${m}]+|[${m}]+`,"gu")}pre_tokenize_text(e,t){return e.match(this.pattern)||[]}}class j extends O{constructor(e){super(),this.config=e;const t="[^\\d]+|\\d"+(this.config.individual_digits?"":"+");this.pattern=new RegExp(t,"gu")}pre_tokenize_text(e,t){return e.match(this.pattern)||[]}}class R extends o.Callable{constructor(e){super(),this.config=e}static fromConfig(e){if(null===e)return null;switch(e.type){case"TemplateProcessing":return new $(e);case"ByteLevel":return new U(e);case"RobertaProcessing":return new W(e);case"BertProcessing":return new G(e);default:throw new Error(`Unknown PostProcessor type: ${e.type}`)}}post_process(e,...t){throw Error("post_process should be implemented in subclass.")}_call(e,...t){return this.post_process(e,...t)}}class G extends R{constructor(e){super(e),this.cls=e.cls[0],this.sep=e.sep[0]}post_process(e,t=null,{add_special_tokens:s=!0}={}){s&&(e=(0,o.mergeArrays)([this.cls],e,[this.sep]));let n=new Array(e.length).fill(0);if(null!==t){const r=s&&this instanceof W?[this.sep]:[],a=s?[this.sep]:[];e=(0,o.mergeArrays)(e,r,t,a),n=(0,o.mergeArrays)(n,new Array(t.length+r.length+a.length).fill(1))}return{tokens:e,token_type_ids:n}}}class W extends G{}class $ extends R{constructor(e){super(e),this.single=e.single,this.pair=e.pair}post_process(e,t=null,{add_special_tokens:s=!0}={}){const n=null===t?this.single:this.pair;let r=[],a=[];for(const i of n)"SpecialToken"in i?s&&(r.push(i.SpecialToken.id),a.push(i.SpecialToken.type_id)):"Sequence"in i&&("A"===i.Sequence.id?(r=(0,o.mergeArrays)(r,e),a=(0,o.mergeArrays)(a,new Array(e.length).fill(i.Sequence.type_id))):"B"===i.Sequence.id&&(r=(0,o.mergeArrays)(r,t),a=(0,o.mergeArrays)(a,new Array(t.length).fill(i.Sequence.type_id))));return{tokens:r,token_type_ids:a}}}class U extends R{post_process(e,t=null){return t&&(e=(0,o.mergeArrays)(e,t)),{tokens:e}}}class X extends o.Callable{constructor(e){super(),this.config=e,this.added_tokens=[],this.end_of_word_suffix=null,this.trim_offsets=e.trim_offsets}static fromConfig(e){if(null===e)return null;switch(e.type){case"WordPiece":return new Z(e);case"Metaspace":return new re(e);case"ByteLevel":return new K(e);case"Replace":return new Q(e);case"ByteFallback":return new H(e);case"Fuse":return new Y(e);case"Strip":return new J(e);case"Sequence":return new te(e);case"CTC":return new ee(e);case"BPEDecoder":return new se(e);default:throw new Error(`Unknown Decoder type: ${e.type}`)}}_call(e){return this.decode(e)}decode(e){return this.decode_chain(e).join("")}decode_chain(e){throw Error("`decode_chain` should be implemented in subclass.")}}class Q extends X{decode_chain(e){const t=d(this.config.pattern);return null===t?e:e.map((e=>e.replaceAll(t,this.config.content)))}}class H extends X{constructor(e){super(e),this.text_decoder=new TextDecoder}decode_chain(e){const t=[];let s=[];for(const o of e){let e=null;if(6===o.length&&o.startsWith("<0x")&&o.endsWith(">")){const t=parseInt(o.slice(3,5),16);isNaN(t)||(e=t)}if(null!==e)s.push(e);else{if(s.length>0){const e=this.text_decoder.decode(Uint8Array.from(s));t.push(e),s=[]}t.push(o)}}if(s.length>0){const e=this.text_decoder.decode(Uint8Array.from(s));t.push(e),s=[]}return t}}class Y extends X{decode_chain(e){return[e.join("")]}}class J extends X{constructor(e){super(e),this.content=this.config.content,this.start=this.config.start,this.stop=this.config.stop}decode_chain(e){return e.map((e=>{let t=0;for(let s=0;s<this.start&&e[s]===this.content;++s)t=s+1;let s=e.length;for(let t=0;t<this.stop;++t){const o=e.length-t-1;if(e[o]!==this.content)break;s=o}return e.slice(t,s)}))}}class Z extends X{constructor(e){super(e),this.cleanup=e.cleanup}decode_chain(e){return e.map(((e,t)=>(0!==t&&(e=e.startsWith(this.config.prefix)?e.replace(this.config.prefix,""):" "+e),this.cleanup&&(e=p(e)),e)))}}class K extends X{constructor(e){super(e),this.byte_decoder=b,this.text_decoder=new TextDecoder("utf-8",{fatal:!1,ignoreBOM:!0}),this.end_of_word_suffix=null}convert_tokens_to_string(e){const t=e.join(""),s=new Uint8Array([...t].map((e=>this.byte_decoder[e])));return this.text_decoder.decode(s)}decode_chain(e){const t=[];let s=[];for(const o of e)void 0!==this.added_tokens.find((e=>e.content===o))?(s.length>0&&(t.push(this.convert_tokens_to_string(s)),s=[]),t.push(o)):s.push(o);return s.length>0&&t.push(this.convert_tokens_to_string(s)),t}}class ee extends X{constructor(e){super(e),this.pad_token=this.config.pad_token,this.word_delimiter_token=this.config.word_delimiter_token,this.cleanup=this.config.cleanup}convert_tokens_to_string(e){if(0===e.length)return"";const t=[e[0]];for(let s=1;s<e.length;++s)e[s]!==t.at(-1)&&t.push(e[s]);let s=t.filter((e=>e!==this.pad_token)).join("");return this.cleanup&&(s=p(s).replaceAll(this.word_delimiter_token," ").trim()),s}decode_chain(e){return[this.convert_tokens_to_string(e)]}}class te extends X{constructor(e){super(e),this.decoders=e.decoders.map((e=>X.fromConfig(e)))}decode_chain(e){return this.decoders.reduce(((e,t)=>t.decode_chain(e)),e)}}class se extends X{constructor(e){super(e),this.suffix=this.config.suffix}decode_chain(e){return e.map(((t,s)=>t.replaceAll(this.suffix,s===e.length-1?"":" ")))}}class oe extends X{decode_chain(e){let t="";for(let s=1;s<e.length;s+=2)t+=e[s];return[t]}}class ne extends O{constructor(e){super(),this.addPrefixSpace=e.add_prefix_space,this.replacement=e.replacement,this.strRep=e.str_rep||this.replacement,this.prepend_scheme=e.prepend_scheme??"always"}pre_tokenize_text(e,{section_index:t}={}){let s=e.replaceAll(" ",this.strRep);return this.addPrefixSpace&&!s.startsWith(this.replacement)&&("always"===this.prepend_scheme||"first"===this.prepend_scheme&&0===t)&&(s=this.strRep+s),[s]}}class re extends X{constructor(e){super(e),this.addPrefixSpace=e.add_prefix_space,this.replacement=e.replacement}decode_chain(e){const t=[];for(let s=0;s<e.length;++s){let o=e[s].replaceAll(this.replacement," ");this.addPrefixSpace&&0==s&&o.startsWith(" ")&&(o=o.substring(1)),t.push(o)}return t}}class ae extends F{constructor(e){super(e),this.charsmap=e.precompiled_charsmap}normalize(e){if((e=(e=e.replace(/[\u0001-\u0008\u000B\u000E-\u001F\u007F\u008F\u009F]/gm,"")).replace(/[\u0009\u000A\u000C\u000D\u1680\u200B\u200C\u200E\u200F\u2028\u2029\u2581\uFEFF\uFFFD]/gm," ")).includes("~")){const t=e.split("~");e=t.map((e=>e.normalize("NFKC"))).join("~")}else e=e.normalize("NFKC");return e}}class ie extends O{constructor(e){super(),this.tokenizers=e.pretokenizers.map((e=>O.fromConfig(e)))}pre_tokenize_text(e,t){return this.tokenizers.reduce(((e,s)=>s.pre_tokenize(e,t)),[e])}}class le extends O{constructor(e){super()}pre_tokenize_text(e,t){return e.match(/\w+|[^\w\s]+/g)||[]}}class ce extends O{constructor(e){super()}pre_tokenize_text(e,t){return function(e){return e.match(/\S+/g)||[]}(e)}}class de extends O{constructor(e){super(),this.config=e,this.pattern=d(this.config.pattern),this.content=this.config.content}pre_tokenize_text(e,t){return null===this.pattern?[e]:[e.replaceAll(this.pattern,this.config.content)]}}const ue=["bos_token","eos_token","unk_token","sep_token","pad_token","cls_token","mask_token"];function he(e,t,s,n){for(const r of Object.keys(e)){const a=t-e[r].length,i=s(r),l=new Array(a).fill(i);e[r]="right"===n?(0,o.mergeArrays)(e[r],l):(0,o.mergeArrays)(l,e[r])}}function pe(e,t){for(const s of Object.keys(e))e[s].length=t}class _e extends o.Callable{return_token_type_ids=!1;_default_chat_template="{% for message in messages %}{{'<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n'}}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% endif %}";constructor(e,t){super(),this._tokenizer_config=t,this.normalizer=F.fromConfig(e.normalizer),this.pre_tokenizer=O.fromConfig(e.pre_tokenizer),this.model=M.fromConfig(e.model,t),this.post_processor=R.fromConfig(e.post_processor),this.decoder=X.fromConfig(e.decoder),this.special_tokens=[],this.all_special_ids=[],this.added_tokens=[];for(const t of e.added_tokens){const e=new g(t);this.added_tokens.push(e),this.model.tokens_to_ids.set(e.content,e.id),this.model.vocab[e.id]=e.content,e.special&&(this.special_tokens.push(e.content),this.all_special_ids.push(e.id))}if(this.additional_special_tokens=t.additional_special_tokens??[],this.special_tokens.push(...this.additional_special_tokens),this.special_tokens=[...new Set(this.special_tokens)],this.decoder&&(this.decoder.added_tokens=this.added_tokens,this.decoder.end_of_word_suffix=this.model.end_of_word_suffix),this.added_tokens_regex=this.added_tokens.length>0?new RegExp(this.added_tokens.map((e=>`${e.lstrip?"\\s*":""}(${(0,o.escapeRegExp)(e.content)})${e.rstrip?"\\s*":""}`)).join("|")):null,this.mask_token=this.getToken("mask_token"),this.mask_token_id=this.model.tokens_to_ids.get(this.mask_token),this.pad_token=this.getToken("pad_token","eos_token"),this.pad_token_id=this.model.tokens_to_ids.get(this.pad_token),this.sep_token=this.getToken("sep_token"),this.sep_token_id=this.model.tokens_to_ids.get(this.sep_token),this.unk_token=this.getToken("unk_token"),this.unk_token_id=this.model.tokens_to_ids.get(this.unk_token),this.model_max_length=t.model_max_length,this.remove_space=t.remove_space,this.clean_up_tokenization_spaces=t.clean_up_tokenization_spaces??!0,this.do_lowercase_and_remove_accent=t.do_lowercase_and_remove_accent??!1,this.padding_side="right",this.legacy=!1,this.chat_template=t.chat_template??null,Array.isArray(this.chat_template)){const e=Object.create(null);for(const{name:t,template:s}of this.chat_template){if("string"!=typeof t||"string"!=typeof s)throw new Error('Chat template must be a list of objects with "name" and "template" properties');e[t]=s}this.chat_template=e}this._compiled_template_cache=new Map}getToken(...e){for(const t of e){const e=this._tokenizer_config[t];if(e){if("object"==typeof e){if("AddedToken"===e.__type)return e.content;throw Error(`Unknown token: ${e}`)}return e}}return null}static async from_pretrained(e,{progress_callback:t=null,config:s=null,cache_dir:o=null,local_files_only:n=!1,revision:r="main",legacy:a=null}={}){return new this(...await c(e,{progress_callback:t,config:s,cache_dir:o,local_files_only:n,revision:r,legacy:a}))}_call(e,{text_pair:t=null,add_special_tokens:s=!0,padding:o=!1,truncation:n=null,max_length:i=null,return_tensor:l=!0}={}){const c=Array.isArray(e);let d;if(c){if(0===e.length)throw Error("text array must be non-empty");if(null!==t){if(!Array.isArray(t))throw Error("text_pair must also be an array");if(e.length!==t.length)throw Error("text and text_pair must have the same length");d=e.map(((e,o)=>this._encode_plus(e,t[o],{add_special_tokens:s})))}else d=e.map((e=>this._encode_plus(e,null,{add_special_tokens:s})))}else{if(null===e)throw Error("text may not be null");if(Array.isArray(t))throw Error("When specifying `text_pair`, since `text` is a string, `text_pair` must also be a string (i.e., not an array).");d=[this._encode_plus(e,t,{add_special_tokens:s})]}if(null===i?i="max_length"===o?this.model_max_length:(0,r.max)(d.map((e=>e.input_ids.length)))[0]:n||console.warn("Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=true` to explicitly truncate examples to max length."),i=Math.min(i,this.model_max_length),o||n)for(let e=0;e<d.length;++e)d[e].input_ids.length!==i&&(d[e].input_ids.length>i?n&&pe(d[e],i):o&&he(d[e],i,(e=>"input_ids"===e?this.pad_token_id:0),this.padding_side));const u={};if(l){if((!o||!n)&&d.some((e=>{for(const t of Object.keys(e))if(e[t].length!==d[0][t]?.length)return!0;return!1})))throw Error("Unable to create tensor, you should probably activate truncation and/or padding with 'padding=true' and 'truncation=true' to have batched tensors with the same length.");const e=[d.length,d[0].input_ids.length];for(const t of Object.keys(d[0]))u[t]=new a.Tensor("int64",BigInt64Array.from(d.flatMap((e=>e[t])).map(BigInt)),e)}else{for(const e of Object.keys(d[0]))u[e]=d.map((t=>t[e]));if(!c)for(const e of Object.keys(u))u[e]=u[e][0]}return u}_encode_text(e){if(null===e)return null;const t=(this.added_tokens_regex?e.split(this.added_tokens_regex).filter((e=>e)):[e]).map(((e,t)=>{if(void 0!==this.added_tokens.find((t=>t.content===e)))return e;{if(!0===this.remove_space&&(e=e.trim().split(/\s+/).join(" ")),this.do_lowercase_and_remove_accent&&(e=function(e){return _(e.toLowerCase())}(e)),null!==this.normalizer&&(e=this.normalizer(e)),0===e.length)return[];const s=null!==this.pre_tokenizer?this.pre_tokenizer(e,{section_index:t}):[e];return this.model(s)}})).flat();return t}_encode_plus(e,t=null,{add_special_tokens:s=!0}={}){const n=this._encode_text(e),r=this._encode_text(t),a=this.post_processor?this.post_processor(n,r,{add_special_tokens:s}):{tokens:(0,o.mergeArrays)(n??[],r??[])},i=this.model.convert_tokens_to_ids(a.tokens),l={input_ids:i,attention_mask:new Array(i.length).fill(1)};return this.return_token_type_ids&&a.token_type_ids&&(l.token_type_ids=a.token_type_ids),l}encode(e,t=null,{add_special_tokens:s=!0}={}){const{input_ids:o}=this._encode_plus(e,t,{add_special_tokens:s});return o}batch_decode(e,t={}){return e instanceof a.Tensor&&(e=e.tolist()),e.map((e=>this.decode(e,t)))}decode(e,t={}){if(e instanceof a.Tensor&&(e=h(e)),!Array.isArray(e)||0===e.length||!(0,o.isIntegralNumber)(e[0]))throw Error("token_ids must be a non-empty array of integers.");return this.decode_single(e,t)}decode_single(e,{skip_special_tokens:t=!1,clean_up_tokenization_spaces:s=null}){let o=this.model.convert_ids_to_tokens(e);t&&(o=o.filter((e=>!this.special_tokens.includes(e))));let n=this.decoder?this.decoder(o):o.join(" ");return this.decoder&&this.decoder.end_of_word_suffix&&(n=n.replaceAll(this.decoder.end_of_word_suffix," "),t&&(n=n.trim())),(s??this.clean_up_tokenization_spaces)&&(n=p(n)),n}get default_chat_template(){return this._warned_about_chat_template||(console.warn("No chat template is defined for this tokenizer - using a default chat template that implements the ChatML format. If the default is not appropriate for your model, please set `tokenizer.chat_template` to an appropriate template. See https://huggingface.co/docs/transformers/main/chat_templating for more information."),this._warned_about_chat_template=!0),this._default_chat_template}apply_chat_template(e,{chat_template:t=null,add_generation_prompt:s=!1,tokenize:o=!0,padding:n=!1,truncation:r=!1,max_length:a=null,return_tensor:i=!0,tokenizer_kwargs:c={},...d}={}){if(this.chat_template&&"object"==typeof this.chat_template||null===this.chat_template&&this.default_chat_template&&"object"==typeof this.default_chat_template){const e=this.chat_template??this.default_chat_template;if(null!==t&&Object.hasOwn(e,t))t=e[t];else if(null===t&&"default"in e)t=e.default;else if(null===t)throw Error(`This model has multiple chat templates with no default specified! Please either pass a chat template or the name of the template you wish to use to the 'chat_template' argument. Available template names are ${Object.keys(e).sort()}.`)}else t??=this.chat_template??this.default_chat_template;if("string"!=typeof t)throw Error("chat_template must be a string, but got "+typeof t);let u=this._compiled_template_cache.get(t);void 0===u&&(u=new l.Template(t),this._compiled_template_cache.set(t,u));const h=Object.create(null);for(const e of ue){const t=this.getToken(e);t&&(h[e]=t)}const p=u.render({messages:e,add_generation_prompt:s,...h,...d});return o?this._call(p,{add_special_tokens:!1,padding:n,truncation:r,max_length:a,return_tensor:i,...c}).input_ids:p}}class me extends _e{return_token_type_ids=!0}class fe extends _e{return_token_type_ids=!0}class ge extends _e{return_token_type_ids=!0}class Me extends _e{return_token_type_ids=!0}class we extends _e{return_token_type_ids=!0}class Te extends _e{return_token_type_ids=!0}class ke extends _e{return_token_type_ids=!0}class be extends _e{return_token_type_ids=!0}class xe extends _e{return_token_type_ids=!0}class ye extends _e{}class Fe extends _e{}class Ce extends _e{return_token_type_ids=!0;constructor(e,t){super(e,t),console.warn('WARNING: `XLMTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')}}class Pe extends _e{return_token_type_ids=!0}class ve extends _e{}class Se extends _e{_default_chat_template='{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}'}class Ae extends _e{}class Le extends _e{constructor(e,t){super(e,t),this.languageRegex=/^[a-z]{2}_[A-Z]{2}$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))),this.lang_to_token=e=>e}_build_translation_inputs(e,t,s){return Ue(this,e,t,s)}}class Ee extends Le{}class ze extends _e{}class Be extends Se{constructor(e,t){const s=".,!?…。,、।۔،",o=e.pre_tokenizer?.pretokenizers[0]?.pattern;o&&o.Regex===` ?[^(\\s|[${s}])]+`&&(o.Regex=` ?[^\\s${s}]+`),super(e,t)}}const Ie="▁";class Oe extends _e{_default_chat_template="{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% elif USE_DEFAULT_PROMPT == true and not '<<SYS>>' in messages[0]['content'] %}{% set loop_messages = messages %}{% set system_message = 'DEFAULT_SYSTEM_MESSAGE' %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\n' + system_message + '\n<</SYS>>\n\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'system' %}{{ '<<SYS>>\n' + content.strip() + '\n<</SYS>>\n\n' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}";DEFAULT_SYSTEM_PROMPT="You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.\n\nIf a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.";constructor(e,t){super(e,t),this.use_default_system_prompt=t.use_default_system_prompt??!1,this.legacy=t.legacy??!0,this.legacy||(this.normalizer=null,this.pre_tokenizer=new ne({replacement:Ie,add_prefix_space:!0,prepend_scheme:"first"}))}_encode_text(e){if(null===e)return null;if(this.legacy||0===e.length)return super._encode_text(e);let t=super._encode_text(Ie+e.replaceAll(Ie," "));return t.length>1&&t[0]===Ie&&this.special_tokens.includes(t[1])&&(t=t.slice(1)),t}get default_chat_template(){return super.default_chat_template.replaceAll("USE_DEFAULT_PROMPT",this.use_default_system_prompt?"true":"false").replaceAll("DEFAULT_SYSTEM_MESSAGE",this.DEFAULT_SYSTEM_PROMPT.replaceAll("\n","\\n").replaceAll("'","\\'"))}}class De extends Oe{}class Ne extends _e{}class Ve extends _e{}class qe extends _e{}class je extends _e{}class Re extends _e{}class Ge extends _e{}class We extends _e{_default_chat_template="{% if messages[0]['role'] == 'system' %}{{ raise_exception('System role not supported') }}{% endif %}{% for message in messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if (message['role'] == 'assistant') %}{% set role = 'model' %}{% else %}{% set role = message['role'] %}{% endif %}{{ '<start_of_turn>' + role + '\n' + message['content'] | trim + '<end_of_turn>\n' }}{% endfor %}{% if add_generation_prompt %}{{'<start_of_turn>model\n'}}{% endif %}"}class $e extends _e{}function Ue(e,t,s,o){if(!("language_codes"in e)||!Array.isArray(e.language_codes))throw new Error("Tokenizer must have `language_codes` attribute set and it should be an array of language ids.");if(!("languageRegex"in e&&e.languageRegex instanceof RegExp))throw new Error("Tokenizer must have `languageRegex` attribute set and it should be a regular expression.");if(!("lang_to_token"in e)||"function"!=typeof e.lang_to_token)throw new Error("Tokenizer must have `lang_to_token` attribute set and it should be a function.");const n=o.src_lang,r=o.tgt_lang;if(!e.language_codes.includes(r))throw new Error(`Target language code "${r}" is not valid. Must be one of: {${e.language_codes.join(", ")}}`);if(void 0!==n){if(!e.language_codes.includes(n))throw new Error(`Source language code "${n}" is not valid. Must be one of: {${e.language_codes.join(", ")}}`);for(const t of e.post_processor.config.single)if("SpecialToken"in t&&e.languageRegex.test(t.SpecialToken.id)){t.SpecialToken.id=e.lang_to_token(n);break}}return o.forced_bos_token_id=e.model.convert_tokens_to_ids([e.lang_to_token(r)])[0],e._call(t,s)}class Xe extends _e{constructor(e,t){super(e,t),this.languageRegex=/^[a-z]{3}_[A-Z][a-z]{3}$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))),this.lang_to_token=e=>e}_build_translation_inputs(e,t,s){return Ue(this,e,t,s)}}class Qe extends _e{constructor(e,t){super(e,t),this.languageRegex=/^__[a-z]{2,3}__$/,this.language_codes=this.special_tokens.filter((e=>this.languageRegex.test(e))).map((e=>e.slice(2,-2))),this.lang_to_token=e=>`__${e}__`}_build_translation_inputs(e,t,s){return Ue(this,e,t,s)}}const He=[["en","english"],["zh","chinese"],["de","german"],["es","spanish"],["ru","russian"],["ko","korean"],["fr","french"],["ja","japanese"],["pt","portuguese"],["tr","turkish"],["pl","polish"],["ca","catalan"],["nl","dutch"],["ar","arabic"],["sv","swedish"],["it","italian"],["id","indonesian"],["hi","hindi"],["fi","finnish"],["vi","vietnamese"],["he","hebrew"],["uk","ukrainian"],["el","greek"],["ms","malay"],["cs","czech"],["ro","romanian"],["da","danish"],["hu","hungarian"],["ta","tamil"],["no","norwegian"],["th","thai"],["ur","urdu"],["hr","croatian"],["bg","bulgarian"],["lt","lithuanian"],["la","latin"],["mi","maori"],["ml","malayalam"],["cy","welsh"],["sk","slovak"],["te","telugu"],["fa","persian"],["lv","latvian"],["bn","bengali"],["sr","serbian"],["az","azerbaijani"],["sl","slovenian"],["kn","kannada"],["et","estonian"],["mk","macedonian"],["br","breton"],["eu","basque"],["is","icelandic"],["hy","armenian"],["ne","nepali"],["mn","mongolian"],["bs","bosnian"],["kk","kazakh"],["sq","albanian"],["sw","swahili"],["gl","galician"],["mr","marathi"],["pa","punjabi"],["si","sinhala"],["km","khmer"],["sn","shona"],["yo","yoruba"],["so","somali"],["af","afrikaans"],["oc","occitan"],["ka","georgian"],["be","belarusian"],["tg","tajik"],["sd","sindhi"],["gu","gujarati"],["am","amharic"],["yi","yiddish"],["lo","lao"],["uz","uzbek"],["fo","faroese"],["ht","haitian creole"],["ps","pashto"],["tk","turkmen"],["nn","nynorsk"],["mt","maltese"],["sa","sanskrit"],["lb","luxembourgish"],["my","myanmar"],["bo","tibetan"],["tl","tagalog"],["mg","malagasy"],["as","assamese"],["tt","tatar"],["haw","hawaiian"],["ln","lingala"],["ha","hausa"],["ba","bashkir"],["jw","javanese"],["su","sundanese"]],Ye=new Map(He),Je=new Map([...He.map((([e,t])=>[t,e])),["burmese","my"],["valencian","ca"],["flemish","nl"],["haitian","ht"],["letzeburgesch","lb"],["pushto","ps"],["panjabi","pa"],["moldavian","ro"],["moldovan","ro"],["sinhalese","si"],["castilian","es"]]);class Ze extends _e{_default_chat_template='{% for message in messages %}" "{{ message.content }}{{ eos_token }}" "{% endfor %}';_decode_asr(e,{return_timestamps:t=!1,return_language:s=!1,time_precision:o=null,force_full_sequences:n=!0}={}){if(null===o)throw Error("Must specify time_precision");let a=null;const i="word"===t;function l(){return{language:a,timestamp:[null,null],text:""}}const c=[];let d=l(),u=0;const h=this.model.convert_tokens_to_ids(["<|notimestamps|>"])[0]+1;let p=[],_=[],m=!1,f=null;const g=new Set(this.all_special_ids);for(const s of e){const e=s.tokens,n=i?s.token_timestamps:null;let M=null,w=h;if("stride"in s){const[t,n,r]=s.stride;if(u-=n,f=t-r,n&&(w=n/o+h),r)for(let t=e.length-1;t>=0;--t){const s=e[t];if(s>=h){if(null!==M&&(s-h)*o<f)break;M=s}}}let T=[],k=[];for(let s=0;s<e.length;++s){const f=e[s];if(g.has(f)){const e=this.decode([f]),s=Ye.get(e.slice(2,-2));if(void 0!==s){if(null!==a&&s!==a&&!t){p.push(T);const e=this.findLongestCommonSequence(p)[0],t=this.decode(e);d.text=t,c.push(d),p=[],T=[],d=l()}a=d.language=s}}else if(f>=h){const e=(f-h)*o+u,t=(0,r.round)(e,2);if(null!==M&&f>=M)m=!0;else if(m||p.length>0&&f<w)m=!1;else if(null===d.timestamp[0])d.timestamp[0]=t;else if(t===d.timestamp[0]);else{d.timestamp[1]=t,p.push(T),i&&_.push(k);const[e,s]=this.findLongestCommonSequence(p,_),o=this.decode(e);d.text=o,i&&(d.words=this.collateWordTimestamps(e,s,a)),c.push(d),p=[],T=[],_=[],k=[],d=l()}}else if(T.push(f),i){let e,t=(0,r.round)(n[s]+u,2);e=s+1<n.length?(0,r.round)(n[s+1]+u,2):null,k.push([t,e])}}if("stride"in s){const[e,t,o]=s.stride;u+=e-o}T.length>0?(p.push(T),i&&_.push(k)):p.every((e=>0===e.length))&&(d=l(),p=[],T=[],_=[],k=[])}if(p.length>0){if(n&&t)throw new Error("Whisper did not predict an ending timestamp, which can happen if audio is cut off in the middle of a word. Also make sure WhisperTimeStampLogitsProcessor was used during generation.");const[e,s]=this.findLongestCommonSequence(p,_),o=this.decode(e);d.text=o,i&&(d.words=this.collateWordTimestamps(e,s,a)),c.push(d)}let M=Object.create(null);const w=c.map((e=>e.text)).join("");if(t||s){for(let e=0;e<c.length;++e){const o=c[e];t||delete o.timestamp,s||delete o.language}if(i){const e=[];for(const t of c)for(const s of t.words)e.push(s);M={chunks:e}}else M={chunks:c}}return[w,M]}findLongestCommonSequence(e,t=null){let s=e[0],o=s.length,n=[];const r=Array.isArray(t)&&t.length>0;let a=r?[]:null,i=r?t[0]:null;for(let l=1;l<e.length;++l){const c=e[l];let d=0,u=[o,o,0,0];const h=c.length;for(let e=1;e<o+h;++e){const t=e/1e4,n=Math.max(0,o-e),r=Math.min(o,o+h-e),a=s.slice(n,r),i=Math.max(0,e-o),l=Math.min(h,e),p=c.slice(i,l);if(a.length!==p.length)throw new Error("There is a bug within whisper `decode_asr` function, please report it. Dropping to prevent bad inference.");const _=a.filter(((e,t)=>e===p[t])).length,m=_/e+t;_>1&&m>d&&(d=m,u=[n,r,i,l])}const[p,_,m,f]=u,g=Math.floor((_+p)/2),M=Math.floor((f+m)/2);n.push(...s.slice(0,g)),s=c.slice(M),o=s.length,r&&(a.push(...i.slice(0,g)),i=t[l].slice(M))}return n.push(...s),r?(a.push(...i),[n,a]):[n,[]]}collateWordTimestamps(e,t,s){const[o,n,r]=this.combineTokensIntoWords(e,s),a=[];for(let e=0;e<o.length;++e){const s=r[e];a.push({text:o[e],timestamp:[t[s.at(0)][0],t[s.at(-1)][1]]})}return a}combineTokensIntoWords(e,t,s="\"'“¡¿([{-",o="\"'.。,,!!??::”)]}、"){let n,r,a;return["chinese","japanese","thai","lao","myanmar"].includes(t=t??"english")?[n,r,a]=this.splitTokensOnUnicode(e):[n,r,a]=this.splitTokensOnSpaces(e),this.mergePunctuations(n,r,a,s,o)}decode(e,t){let s;return t&&t.decode_with_timestamps?(e instanceof a.Tensor&&(e=h(e)),s=this.decodeWithTimestamps(e,t)):s=super.decode(e,t),s}decodeWithTimestamps(e,t){const s=t?.time_precision??.02,o=Array.from(this.all_special_ids).at(-1)+1;let n=[[]];for(const t of e)if(t>=o){const e=(0,r.round)((t-o)*s,2);n.push(`<|${e}|>`),n.push([])}else n[n.length-1].push(t);return n=n.map((e=>"string"==typeof e?e:super.decode(e,t))),n.join("")}splitTokensOnUnicode(e){const t=this.decode(e,{decode_with_timestamps:!0}),s=[],o=[],n=[];let r=[],a=[],i=0;for(let l=0;l<e.length;++l){const c=e[l];r.push(c),a.push(l);const d=this.decode(r,{decode_with_timestamps:!0});d.includes("�")&&"�"!==t[i+d.indexOf("�")]||(s.push(d),o.push(r),n.push(a),r=[],a=[],i+=d.length)}return[s,o,n]}splitTokensOnSpaces(e){const[t,s,o]=this.splitTokensOnUnicode(e),n=[],r=[],a=[],i=new RegExp(`^[${m}]$`,"gu");for(let e=0;e<t.length;++e){const l=t[e],c=s[e],d=o[e],u=c[0]>=this.model.tokens_to_ids.get("<|endoftext|>"),h=l.startsWith(" "),p=l.trim(),_=i.test(p);if(u||h||_||0===n.length)n.push(l),r.push(c),a.push(d);else{const e=n.length-1;n[e]+=l,r[e].push(...c),a[e].push(...d)}}return[n,r,a]}mergePunctuations(e,t,s,n,r){const a=structuredClone(e),i=structuredClone(t),l=structuredClone(s);let c=a.length-2,d=a.length-1;for(;c>=0;)a[c].startsWith(" ")&&n.includes(a[c].trim())?(a[d]=a[c]+a[d],i[d]=(0,o.mergeArrays)(i[c],i[d]),l[d]=(0,o.mergeArrays)(l[c],l[d]),a[c]="",i[c]=[],l[c]=[]):d=c,--c;for(c=0,d=1;d<a.length;)!a[c].endsWith(" ")&&r.includes(a[d])?(a[c]+=a[d],i[c]=(0,o.mergeArrays)(i[c],i[d]),l[c]=(0,o.mergeArrays)(l[c],l[d]),a[d]="",i[d]=[],l[d]=[]):c=d,++d;return[a.filter((e=>e)),i.filter((e=>e.length>0)),l.filter((e=>e.length>0))]}get_decoder_prompt_ids({language:e=null,task:t=null,no_timestamps:s=!0}={}){const o=[];if(e){e=e.toLowerCase();let t=Je.get(e);if(void 0===t){if(!Ye.has(e)){const t=2===e.length?Ye.keys():Ye.values();throw new Error(`Language "${e}" is not supported. Must be one of: ${JSON.stringify(t)}`)}t=e}const s=this.model.tokens_to_ids.get(`<|${t}|>`);if(void 0===s)throw new Error(`Unable to find language "${t}" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.`);o.push(s)}else o.push(null);if(t){if("transcribe"!==(t=t.toLowerCase())&&"translate"!==t)throw new Error(`Task "${t}" is not supported. Must be one of: ["transcribe", "translate"]`);const e=this.model.tokens_to_ids.get(`<|${t}|>`);if(void 0===e)throw new Error(`Unable to find task "${t}" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.`);o.push(e)}else o.push(null);if(s){const e=this.model.tokens_to_ids.get("<|notimestamps|>");if(void 0===e)throw new Error('Unable to find "<|notimestamps|>" in model vocabulary. Please report this issue at https://github.com/xenova/transformers.js/issues/new/choose.');o.push(e)}return o.map(((e,t)=>[t+1,e])).filter((e=>null!==e[1]))}}class Ke extends _e{}class et extends _e{}class tt extends _e{}class st extends _e{constructor(e,t){super(e,t),this.languageRegex=/^(>>\w+<<)\s*/g,this.supported_language_codes=this.model.vocab.filter((e=>this.languageRegex.test(e))),console.warn('WARNING: `MarianTokenizer` is not yet supported by Hugging Face\'s "fast" tokenizers library. Therefore, you may experience slightly inaccurate results.')}_encode_text(e){if(null===e)return null;const[t,...s]=e.trim().split(this.languageRegex);if(0===s.length)return super._encode_text(t);if(2===s.length){const[e,t]=s;return this.supported_language_codes.includes(e)||console.warn(`Unsupported language code "${e}" detected, which may lead to unexpected behavior. Should be one of: ${JSON.stringify(this.supported_language_codes)}`),(0,o.mergeArrays)([e],super._encode_text(t))}}}class ot extends _e{}class nt extends _e{_default_chat_template="{% for message in messages %}{% if message['role'] == 'user' %}{{ ' ' }}{% endif %}{{ message['content'] }}{% if not loop.last %}{{ ' ' }}{% endif %}{% endfor %}{{ eos_token }}"}class rt extends nt{}class at extends _e{}class it extends _e{}class lt extends _e{constructor(e,t){super(e,t),this.decoder=new oe({})}}class ct extends _e{}class dt{static TOKENIZER_CLASS_MAPPING={T5Tokenizer:ve,DistilBertTokenizer:ye,CamembertTokenizer:Fe,DebertaTokenizer:we,DebertaV2Tokenizer:Te,BertTokenizer:me,HerbertTokenizer:ke,ConvBertTokenizer:be,RoFormerTokenizer:xe,XLMTokenizer:Ce,ElectraTokenizer:Pe,MobileBertTokenizer:ge,SqueezeBertTokenizer:Me,AlbertTokenizer:fe,GPT2Tokenizer:Se,BartTokenizer:Ae,MBartTokenizer:Le,MBart50Tokenizer:Ee,RobertaTokenizer:ze,WhisperTokenizer:Ze,CodeGenTokenizer:Ke,CLIPTokenizer:et,SiglipTokenizer:tt,MarianTokenizer:st,BloomTokenizer:Be,NllbTokenizer:Xe,M2M100Tokenizer:Qe,LlamaTokenizer:Oe,CodeLlamaTokenizer:De,XLMRobertaTokenizer:Ne,MPNetTokenizer:Ve,FalconTokenizer:qe,GPTNeoXTokenizer:je,EsmTokenizer:Re,Wav2Vec2CTCTokenizer:ot,BlenderbotTokenizer:nt,BlenderbotSmallTokenizer:rt,SpeechT5Tokenizer:at,NougatTokenizer:it,VitsTokenizer:lt,Qwen2Tokenizer:Ge,GemmaTokenizer:We,Grok1Tokenizer:$e,CohereTokenizer:ct,PreTrainedTokenizer:_e};static async from_pretrained(e,{quantized:t=!0,progress_callback:s=null,config:o=null,cache_dir:n=null,local_files_only:r=!1,revision:a="main",legacy:i=null}={}){const[l,d]=await c(e,{quantized:t,progress_callback:s,config:o,cache_dir:n,local_files_only:r,revision:a,legacy:i}),u=d.tokenizer_class?.replace(/Fast$/,"")??"PreTrainedTokenizer";let h=this.TOKENIZER_CLASS_MAPPING[u];return h||(console.warn(`Unknown tokenizer class "${u}", attempting to construct from base class.`),h=_e),new h(l,d)}}},"./src/transformers.js":
/*!*****************************!*\
!*** ./src/transformers.js ***!
\*****************************/(e,t,s)=>{s.r(t),s.d(t,{ASTFeatureExtractor:()=>i.ASTFeatureExtractor,ASTForAudioClassification:()=>r.ASTForAudioClassification,ASTModel:()=>r.ASTModel,ASTPreTrainedModel:()=>r.ASTPreTrainedModel,AlbertForMaskedLM:()=>r.AlbertForMaskedLM,AlbertForQuestionAnswering:()=>r.AlbertForQuestionAnswering,AlbertForSequenceClassification:()=>r.AlbertForSequenceClassification,AlbertModel:()=>r.AlbertModel,AlbertPreTrainedModel:()=>r.AlbertPreTrainedModel,AlbertTokenizer:()=>a.AlbertTokenizer,AudioClassificationPipeline:()=>o.AudioClassificationPipeline,AutoConfig:()=>l.AutoConfig,AutoModel:()=>r.AutoModel,AutoModelForAudioClassification:()=>r.AutoModelForAudioClassification,AutoModelForAudioFrameClassification:()=>r.AutoModelForAudioFrameClassification,AutoModelForCTC:()=>r.AutoModelForCTC,AutoModelForCausalLM:()=>r.AutoModelForCausalLM,AutoModelForDepthEstimation:()=>r.AutoModelForDepthEstimation,AutoModelForDocumentQuestionAnswering:()=>r.AutoModelForDocumentQuestionAnswering,AutoModelForImageClassification:()=>r.AutoModelForImageClassification,AutoModelForImageFeatureExtraction:()=>r.AutoModelForImageFeatureExtraction,AutoModelForImageMatting:()=>r.AutoModelForImageMatting,AutoModelForImageSegmentation:()=>r.AutoModelForImageSegmentation,AutoModelForImageToImage:()=>r.AutoModelForImageToImage,AutoModelForMaskGeneration:()=>r.AutoModelForMaskGeneration,AutoModelForMaskedLM:()=>r.AutoModelForMaskedLM,AutoModelForObjectDetection:()=>r.AutoModelForObjectDetection,AutoModelForQuestionAnswering:()=>r.AutoModelForQuestionAnswering,AutoModelForSemanticSegmentation:()=>r.AutoModelForSemanticSegmentation,AutoModelForSeq2SeqLM:()=>r.AutoModelForSeq2SeqLM,AutoModelForSequenceClassification:()=>r.AutoModelForSequenceClassification,AutoModelForSpeechSeq2Seq:()=>r.AutoModelForSpeechSeq2Seq,AutoModelForTextToSpectrogram:()=>r.AutoModelForTextToSpectrogram,AutoModelForTextToWaveform:()=>r.AutoModelForTextToWaveform,AutoModelForTokenClassification:()=>r.AutoModelForTokenClassification,AutoModelForVision2Seq:()=>r.AutoModelForVision2Seq,AutoModelForXVector:()=>r.AutoModelForXVector,AutoModelForZeroShotObjectDetection:()=>r.AutoModelForZeroShotObjectDetection,AutoProcessor:()=>i.AutoProcessor,AutoTokenizer:()=>a.AutoTokenizer,AutomaticSpeechRecognitionPipeline:()=>o.AutomaticSpeechRecognitionPipeline,BartForConditionalGeneration:()=>r.BartForConditionalGeneration,BartForSequenceClassification:()=>r.BartForSequenceClassification,BartModel:()=>r.BartModel,BartPretrainedModel:()=>r.BartPretrainedModel,BartTokenizer:()=>a.BartTokenizer,BaseModelOutput:()=>r.BaseModelOutput,BeitFeatureExtractor:()=>i.BeitFeatureExtractor,BeitForImageClassification:()=>r.BeitForImageClassification,BeitModel:()=>r.BeitModel,BeitPreTrainedModel:()=>r.BeitPreTrainedModel,BertForMaskedLM:()=>r.BertForMaskedLM,BertForQuestionAnswering:()=>r.BertForQuestionAnswering,BertForSequenceClassification:()=>r.BertForSequenceClassification,BertForTokenClassification:()=>r.BertForTokenClassification,BertModel:()=>r.BertModel,BertPreTrainedModel:()=>r.BertPreTrainedModel,BertTokenizer:()=>a.BertTokenizer,BitImageProcessor:()=>i.BitImageProcessor,BlenderbotForConditionalGeneration:()=>r.BlenderbotForConditionalGeneration,BlenderbotModel:()=>r.BlenderbotModel,BlenderbotPreTrainedModel:()=>r.BlenderbotPreTrainedModel,BlenderbotSmallForConditionalGeneration:()=>r.BlenderbotSmallForConditionalGeneration,BlenderbotSmallModel:()=>r.BlenderbotSmallModel,BlenderbotSmallPreTrainedModel:()=>r.BlenderbotSmallPreTrainedModel,BlenderbotSmallTokenizer:()=>a.BlenderbotSmallTokenizer,BlenderbotTokenizer:()=>a.BlenderbotTokenizer,BloomForCausalLM:()=>r.BloomForCausalLM,BloomModel:()=>r.BloomModel,BloomPreTrainedModel:()=>r.BloomPreTrainedModel,BloomTokenizer:()=>a.BloomTokenizer,CLIPFeatureExtractor:()=>i.CLIPFeatureExtractor,CLIPModel:()=>r.CLIPModel,CLIPPreTrainedModel:()=>r.CLIPPreTrainedModel,CLIPSegForImageSegmentation:()=>r.CLIPSegForImageSegmentation,CLIPSegModel:()=>r.CLIPSegModel,CLIPSegPreTrainedModel:()=>r.CLIPSegPreTrainedModel,CLIPTextModelWithProjection:()=>r.CLIPTextModelWithProjection,CLIPTokenizer:()=>a.CLIPTokenizer,CLIPVisionModelWithProjection:()=>r.CLIPVisionModelWithProjection,CamembertForMaskedLM:()=>r.CamembertForMaskedLM,CamembertForQuestionAnswering:()=>r.CamembertForQuestionAnswering,CamembertForSequenceClassification:()=>r.CamembertForSequenceClassification,CamembertForTokenClassification:()=>r.CamembertForTokenClassification,CamembertModel:()=>r.CamembertModel,CamembertPreTrainedModel:()=>r.CamembertPreTrainedModel,CamembertTokenizer:()=>a.CamembertTokenizer,CausalLMOutput:()=>r.CausalLMOutput,CausalLMOutputWithPast:()=>r.CausalLMOutputWithPast,ChineseCLIPFeatureExtractor:()=>i.ChineseCLIPFeatureExtractor,ChineseCLIPModel:()=>r.ChineseCLIPModel,ChineseCLIPPreTrainedModel:()=>r.ChineseCLIPPreTrainedModel,ClapAudioModelWithProjection:()=>r.ClapAudioModelWithProjection,ClapFeatureExtractor:()=>i.ClapFeatureExtractor,ClapModel:()=>r.ClapModel,ClapPreTrainedModel:()=>r.ClapPreTrainedModel,ClapTextModelWithProjection:()=>r.ClapTextModelWithProjection,CodeGenForCausalLM:()=>r.CodeGenForCausalLM,CodeGenModel:()=>r.CodeGenModel,CodeGenPreTrainedModel:()=>r.CodeGenPreTrainedModel,CodeGenTokenizer:()=>a.CodeGenTokenizer,CodeLlamaTokenizer:()=>a.CodeLlamaTokenizer,CohereTokenizer:()=>a.CohereTokenizer,ConvBertForMaskedLM:()=>r.ConvBertForMaskedLM,ConvBertForQuestionAnswering:()=>r.ConvBertForQuestionAnswering,ConvBertForSequenceClassification:()=>r.ConvBertForSequenceClassification,ConvBertForTokenClassification:()=>r.ConvBertForTokenClassification,ConvBertModel:()=>r.ConvBertModel,ConvBertPreTrainedModel:()=>r.ConvBertPreTrainedModel,ConvBertTokenizer:()=>a.ConvBertTokenizer,ConvNextFeatureExtractor:()=>i.ConvNextFeatureExtractor,ConvNextForImageClassification:()=>r.ConvNextForImageClassification,ConvNextImageProcessor:()=>i.ConvNextImageProcessor,ConvNextModel:()=>r.ConvNextModel,ConvNextPreTrainedModel:()=>r.ConvNextPreTrainedModel,ConvNextV2ForImageClassification:()=>r.ConvNextV2ForImageClassification,ConvNextV2Model:()=>r.ConvNextV2Model,ConvNextV2PreTrainedModel:()=>r.ConvNextV2PreTrainedModel,DPTFeatureExtractor:()=>i.DPTFeatureExtractor,DPTForDepthEstimation:()=>r.DPTForDepthEstimation,DPTImageProcessor:()=>i.DPTImageProcessor,DPTModel:()=>r.DPTModel,DPTPreTrainedModel:()=>r.DPTPreTrainedModel,DebertaForMaskedLM:()=>r.DebertaForMaskedLM,DebertaForQuestionAnswering:()=>r.DebertaForQuestionAnswering,DebertaForSequenceClassification:()=>r.DebertaForSequenceClassification,DebertaForTokenClassification:()=>r.DebertaForTokenClassification,DebertaModel:()=>r.DebertaModel,DebertaPreTrainedModel:()=>r.DebertaPreTrainedModel,DebertaTokenizer:()=>a.DebertaTokenizer,DebertaV2ForMaskedLM:()=>r.DebertaV2ForMaskedLM,DebertaV2ForQuestionAnswering:()=>r.DebertaV2ForQuestionAnswering,DebertaV2ForSequenceClassification:()=>r.DebertaV2ForSequenceClassification,DebertaV2ForTokenClassification:()=>r.DebertaV2ForTokenClassification,DebertaV2Model:()=>r.DebertaV2Model,DebertaV2PreTrainedModel:()=>r.DebertaV2PreTrainedModel,DebertaV2Tokenizer:()=>a.DebertaV2Tokenizer,DeiTFeatureExtractor:()=>i.DeiTFeatureExtractor,DeiTForImageClassification:()=>r.DeiTForImageClassification,DeiTModel:()=>r.DeiTModel,DeiTPreTrainedModel:()=>r.DeiTPreTrainedModel,DepthAnythingForDepthEstimation:()=>r.DepthAnythingForDepthEstimation,DepthAnythingPreTrainedModel:()=>r.DepthAnythingPreTrainedModel,DepthEstimationPipeline:()=>o.DepthEstimationPipeline,DetrFeatureExtractor:()=>i.DetrFeatureExtractor,DetrForObjectDetection:()=>r.DetrForObjectDetection,DetrForSegmentation:()=>r.DetrForSegmentation,DetrModel:()=>r.DetrModel,DetrObjectDetectionOutput:()=>r.DetrObjectDetectionOutput,DetrPreTrainedModel:()=>r.DetrPreTrainedModel,DetrSegmentationOutput:()=>r.DetrSegmentationOutput,Dinov2ForImageClassification:()=>r.Dinov2ForImageClassification,Dinov2Model:()=>r.Dinov2Model,Dinov2PreTrainedModel:()=>r.Dinov2PreTrainedModel,DistilBertForMaskedLM:()=>r.DistilBertForMaskedLM,DistilBertForQuestionAnswering:()=>r.DistilBertForQuestionAnswering,DistilBertForSequenceClassification:()=>r.DistilBertForSequenceClassification,DistilBertForTokenClassification:()=>r.DistilBertForTokenClassification,DistilBertModel:()=>r.DistilBertModel,DistilBertPreTrainedModel:()=>r.DistilBertPreTrainedModel,DistilBertTokenizer:()=>a.DistilBertTokenizer,DocumentQuestionAnsweringPipeline:()=>o.DocumentQuestionAnsweringPipeline,DonutFeatureExtractor:()=>i.DonutFeatureExtractor,DonutSwinModel:()=>r.DonutSwinModel,DonutSwinPreTrainedModel:()=>r.DonutSwinPreTrainedModel,EfficientNetForImageClassification:()=>r.EfficientNetForImageClassification,EfficientNetImageProcessor:()=>i.EfficientNetImageProcessor,EfficientNetModel:()=>r.EfficientNetModel,EfficientNetPreTrainedModel:()=>r.EfficientNetPreTrainedModel,ElectraForMaskedLM:()=>r.ElectraForMaskedLM,ElectraForQuestionAnswering:()=>r.ElectraForQuestionAnswering,ElectraForSequenceClassification:()=>r.ElectraForSequenceClassification,ElectraForTokenClassification:()=>r.ElectraForTokenClassification,ElectraModel:()=>r.ElectraModel,ElectraPreTrainedModel:()=>r.ElectraPreTrainedModel,ElectraTokenizer:()=>a.ElectraTokenizer,EsmForMaskedLM:()=>r.EsmForMaskedLM,EsmForSequenceClassification:()=>r.EsmForSequenceClassification,EsmForTokenClassification:()=>r.EsmForTokenClassification,EsmModel:()=>r.EsmModel,EsmPreTrainedModel:()=>r.EsmPreTrainedModel,EsmTokenizer:()=>a.EsmTokenizer,FFT:()=>h.FFT,FalconForCausalLM:()=>r.FalconForCausalLM,FalconModel:()=>r.FalconModel,FalconPreTrainedModel:()=>r.FalconPreTrainedModel,FalconTokenizer:()=>a.FalconTokenizer,FeatureExtractionPipeline:()=>o.FeatureExtractionPipeline,FeatureExtractor:()=>i.FeatureExtractor,FillMaskPipeline:()=>o.FillMaskPipeline,GLPNFeatureExtractor:()=>i.GLPNFeatureExtractor,GLPNForDepthEstimation:()=>r.GLPNForDepthEstimation,GLPNModel:()=>r.GLPNModel,GLPNPreTrainedModel:()=>r.GLPNPreTrainedModel,GPT2LMHeadModel:()=>r.GPT2LMHeadModel,GPT2Model:()=>r.GPT2Model,GPT2PreTrainedModel:()=>r.GPT2PreTrainedModel,GPT2Tokenizer:()=>a.GPT2Tokenizer,GPTBigCodeForCausalLM:()=>r.GPTBigCodeForCausalLM,GPTBigCodeModel:()=>r.GPTBigCodeModel,GPTBigCodePreTrainedModel:()=>r.GPTBigCodePreTrainedModel,GPTJForCausalLM:()=>r.GPTJForCausalLM,GPTJModel:()=>r.GPTJModel,GPTJPreTrainedModel:()=>r.GPTJPreTrainedModel,GPTNeoForCausalLM:()=>r.GPTNeoForCausalLM,GPTNeoModel:()=>r.GPTNeoModel,GPTNeoPreTrainedModel:()=>r.GPTNeoPreTrainedModel,GPTNeoXForCausalLM:()=>r.GPTNeoXForCausalLM,GPTNeoXModel:()=>r.GPTNeoXModel,GPTNeoXPreTrainedModel:()=>r.GPTNeoXPreTrainedModel,GPTNeoXTokenizer:()=>a.GPTNeoXTokenizer,GemmaTokenizer:()=>a.GemmaTokenizer,Grok1Tokenizer:()=>a.Grok1Tokenizer,HerbertTokenizer:()=>a.HerbertTokenizer,HubertForCTC:()=>r.HubertForCTC,HubertForSequenceClassification:()=>r.HubertForSequenceClassification,HubertModel:()=>r.HubertModel,HubertPreTrainedModel:()=>r.HubertPreTrainedModel,ImageClassificationPipeline:()=>o.ImageClassificationPipeline,ImageFeatureExtractionPipeline:()=>o.ImageFeatureExtractionPipeline,ImageFeatureExtractor:()=>i.ImageFeatureExtractor,ImageMattingOutput:()=>r.ImageMattingOutput,ImageSegmentationPipeline:()=>o.ImageSegmentationPipeline,ImageToImagePipeline:()=>o.ImageToImagePipeline,ImageToTextPipeline:()=>o.ImageToTextPipeline,LlamaForCausalLM:()=>r.LlamaForCausalLM,LlamaModel:()=>r.LlamaModel,LlamaPreTrainedModel:()=>r.LlamaPreTrainedModel,LlamaTokenizer:()=>a.LlamaTokenizer,LongT5ForConditionalGeneration:()=>r.LongT5ForConditionalGeneration,LongT5Model:()=>r.LongT5Model,LongT5PreTrainedModel:()=>r.LongT5PreTrainedModel,M2M100ForConditionalGeneration:()=>r.M2M100ForConditionalGeneration,M2M100Model:()=>r.M2M100Model,M2M100PreTrainedModel:()=>r.M2M100PreTrainedModel,M2M100Tokenizer:()=>a.M2M100Tokenizer,MBart50Tokenizer:()=>a.MBart50Tokenizer,MBartForCausalLM:()=>r.MBartForCausalLM,MBartForConditionalGeneration:()=>r.MBartForConditionalGeneration,MBartForSequenceClassification:()=>r.MBartForSequenceClassification,MBartModel:()=>r.MBartModel,MBartPreTrainedModel:()=>r.MBartPreTrainedModel,MBartTokenizer:()=>a.MBartTokenizer,MPNetForMaskedLM:()=>r.MPNetForMaskedLM,MPNetForQuestionAnswering:()=>r.MPNetForQuestionAnswering,MPNetForSequenceClassification:()=>r.MPNetForSequenceClassification,MPNetForTokenClassification:()=>r.MPNetForTokenClassification,MPNetModel:()=>r.MPNetModel,MPNetPreTrainedModel:()=>r.MPNetPreTrainedModel,MPNetTokenizer:()=>a.MPNetTokenizer,MT5ForConditionalGeneration:()=>r.MT5ForConditionalGeneration,MT5Model:()=>r.MT5Model,MT5PreTrainedModel:()=>r.MT5PreTrainedModel,MarianMTModel:()=>r.MarianMTModel,MarianModel:()=>r.MarianModel,MarianPreTrainedModel:()=>r.MarianPreTrainedModel,MarianTokenizer:()=>a.MarianTokenizer,MaskedLMOutput:()=>r.MaskedLMOutput,MistralForCausalLM:()=>r.MistralForCausalLM,MistralModel:()=>r.MistralModel,MistralPreTrainedModel:()=>r.MistralPreTrainedModel,MobileBertForMaskedLM:()=>r.MobileBertForMaskedLM,MobileBertForQuestionAnswering:()=>r.MobileBertForQuestionAnswering,MobileBertForSequenceClassification:()=>r.MobileBertForSequenceClassification,MobileBertModel:()=>r.MobileBertModel,MobileBertPreTrainedModel:()=>r.MobileBertPreTrainedModel,MobileBertTokenizer:()=>a.MobileBertTokenizer,MobileViTFeatureExtractor:()=>i.MobileViTFeatureExtractor,MobileViTForImageClassification:()=>r.MobileViTForImageClassification,MobileViTModel:()=>r.MobileViTModel,MobileViTPreTrainedModel:()=>r.MobileViTPreTrainedModel,ModelOutput:()=>r.ModelOutput,MptForCausalLM:()=>r.MptForCausalLM,MptModel:()=>r.MptModel,MptPreTrainedModel:()=>r.MptPreTrainedModel,NllbTokenizer:()=>a.NllbTokenizer,NomicBertModel:()=>r.NomicBertModel,NomicBertPreTrainedModel:()=>r.NomicBertPreTrainedModel,NougatImageProcessor:()=>i.NougatImageProcessor,NougatTokenizer:()=>a.NougatTokenizer,OPTForCausalLM:()=>r.OPTForCausalLM,OPTModel:()=>r.OPTModel,OPTPreTrainedModel:()=>r.OPTPreTrainedModel,ObjectDetectionPipeline:()=>o.ObjectDetectionPipeline,OwlViTFeatureExtractor:()=>i.OwlViTFeatureExtractor,OwlViTForObjectDetection:()=>r.OwlViTForObjectDetection,OwlViTModel:()=>r.OwlViTModel,OwlViTPreTrainedModel:()=>r.OwlViTPreTrainedModel,OwlViTProcessor:()=>i.OwlViTProcessor,Owlv2ForObjectDetection:()=>r.Owlv2ForObjectDetection,Owlv2ImageProcessor:()=>i.Owlv2ImageProcessor,Owlv2Model:()=>r.Owlv2Model,Owlv2PreTrainedModel:()=>r.Owlv2PreTrainedModel,PhiForCausalLM:()=>r.PhiForCausalLM,PhiModel:()=>r.PhiModel,PhiPreTrainedModel:()=>r.PhiPreTrainedModel,Pipeline:()=>o.Pipeline,PreTrainedModel:()=>r.PreTrainedModel,PreTrainedTokenizer:()=>a.PreTrainedTokenizer,PretrainedConfig:()=>l.PretrainedConfig,PretrainedMixin:()=>r.PretrainedMixin,Processor:()=>i.Processor,QuestionAnsweringModelOutput:()=>r.QuestionAnsweringModelOutput,QuestionAnsweringPipeline:()=>o.QuestionAnsweringPipeline,Qwen2ForCausalLM:()=>r.Qwen2ForCausalLM,Qwen2Model:()=>r.Qwen2Model,Qwen2PreTrainedModel:()=>r.Qwen2PreTrainedModel,Qwen2Tokenizer:()=>a.Qwen2Tokenizer,RawImage:()=>d.RawImage,ResNetForImageClassification:()=>r.ResNetForImageClassification,ResNetModel:()=>r.ResNetModel,ResNetPreTrainedModel:()=>r.ResNetPreTrainedModel,RoFormerForMaskedLM:()=>r.RoFormerForMaskedLM,RoFormerForQuestionAnswering:()=>r.RoFormerForQuestionAnswering,RoFormerForSequenceClassification:()=>r.RoFormerForSequenceClassification,RoFormerForTokenClassification:()=>r.RoFormerForTokenClassification,RoFormerModel:()=>r.RoFormerModel,RoFormerPreTrainedModel:()=>r.RoFormerPreTrainedModel,RoFormerTokenizer:()=>a.RoFormerTokenizer,RobertaForMaskedLM:()=>r.RobertaForMaskedLM,RobertaForQuestionAnswering:()=>r.RobertaForQuestionAnswering,RobertaForSequenceClassification:()=>r.RobertaForSequenceClassification,RobertaForTokenClassification:()=>r.RobertaForTokenClassification,RobertaModel:()=>r.RobertaModel,RobertaPreTrainedModel:()=>r.RobertaPreTrainedModel,RobertaTokenizer:()=>a.RobertaTokenizer,SamImageProcessor:()=>i.SamImageProcessor,SamImageSegmentationOutput:()=>r.SamImageSegmentationOutput,SamModel:()=>r.SamModel,SamPreTrainedModel:()=>r.SamPreTrainedModel,SamProcessor:()=>i.SamProcessor,SeamlessM4TFeatureExtractor:()=>i.SeamlessM4TFeatureExtractor,SegformerFeatureExtractor:()=>i.SegformerFeatureExtractor,SegformerForImageClassification:()=>r.SegformerForImageClassification,SegformerForSemanticSegmentation:()=>r.SegformerForSemanticSegmentation,SegformerModel:()=>r.SegformerModel,SegformerPreTrainedModel:()=>r.SegformerPreTrainedModel,Seq2SeqLMOutput:()=>r.Seq2SeqLMOutput,SequenceClassifierOutput:()=>r.SequenceClassifierOutput,SiglipImageProcessor:()=>i.SiglipImageProcessor,SiglipModel:()=>r.SiglipModel,SiglipPreTrainedModel:()=>r.SiglipPreTrainedModel,SiglipTextModel:()=>r.SiglipTextModel,SiglipTokenizer:()=>a.SiglipTokenizer,SiglipVisionModel:()=>r.SiglipVisionModel,SpeechT5FeatureExtractor:()=>i.SpeechT5FeatureExtractor,SpeechT5ForSpeechToText:()=>r.SpeechT5ForSpeechToText,SpeechT5ForTextToSpeech:()=>r.SpeechT5ForTextToSpeech,SpeechT5HifiGan:()=>r.SpeechT5HifiGan,SpeechT5Model:()=>r.SpeechT5Model,SpeechT5PreTrainedModel:()=>r.SpeechT5PreTrainedModel,SpeechT5Processor:()=>i.SpeechT5Processor,SpeechT5Tokenizer:()=>a.SpeechT5Tokenizer,SqueezeBertForMaskedLM:()=>r.SqueezeBertForMaskedLM,SqueezeBertForQuestionAnswering:()=>r.SqueezeBertForQuestionAnswering,SqueezeBertForSequenceClassification:()=>r.SqueezeBertForSequenceClassification,SqueezeBertModel:()=>r.SqueezeBertModel,SqueezeBertPreTrainedModel:()=>r.SqueezeBertPreTrainedModel,SqueezeBertTokenizer:()=>a.SqueezeBertTokenizer,StableLmForCausalLM:()=>r.StableLmForCausalLM,StableLmModel:()=>r.StableLmModel,StableLmPreTrainedModel:()=>r.StableLmPreTrainedModel,Starcoder2ForCausalLM:()=>r.Starcoder2ForCausalLM,Starcoder2Model:()=>r.Starcoder2Model,Starcoder2PreTrainedModel:()=>r.Starcoder2PreTrainedModel,SummarizationPipeline:()=>o.SummarizationPipeline,Swin2SRForImageSuperResolution:()=>r.Swin2SRForImageSuperResolution,Swin2SRImageProcessor:()=>i.Swin2SRImageProcessor,Swin2SRModel:()=>r.Swin2SRModel,Swin2SRPreTrainedModel:()=>r.Swin2SRPreTrainedModel,SwinForImageClassification:()=>r.SwinForImageClassification,SwinModel:()=>r.SwinModel,SwinPreTrainedModel:()=>r.SwinPreTrainedModel,T5ForConditionalGeneration:()=>r.T5ForConditionalGeneration,T5Model:()=>r.T5Model,T5PreTrainedModel:()=>r.T5PreTrainedModel,T5Tokenizer:()=>a.T5Tokenizer,TableTransformerForObjectDetection:()=>r.TableTransformerForObjectDetection,TableTransformerModel:()=>r.TableTransformerModel,TableTransformerObjectDetectionOutput:()=>r.TableTransformerObjectDetectionOutput,TableTransformerPreTrainedModel:()=>r.TableTransformerPreTrainedModel,Tensor:()=>u.Tensor,Text2TextGenerationPipeline:()=>o.Text2TextGenerationPipeline,TextClassificationPipeline:()=>o.TextClassificationPipeline,TextGenerationPipeline:()=>o.TextGenerationPipeline,TextToAudioPipeline:()=>o.TextToAudioPipeline,TokenClassificationPipeline:()=>o.TokenClassificationPipeline,TokenClassifierOutput:()=>r.TokenClassifierOutput,TokenizerModel:()=>a.TokenizerModel,TrOCRForCausalLM:()=>r.TrOCRForCausalLM,TrOCRPreTrainedModel:()=>r.TrOCRPreTrainedModel,TranslationPipeline:()=>o.TranslationPipeline,UniSpeechForCTC:()=>r.UniSpeechForCTC,UniSpeechForSequenceClassification:()=>r.UniSpeechForSequenceClassification,UniSpeechModel:()=>r.UniSpeechModel,UniSpeechPreTrainedModel:()=>r.UniSpeechPreTrainedModel,UniSpeechSatForAudioFrameClassification:()=>r.UniSpeechSatForAudioFrameClassification,UniSpeechSatForCTC:()=>r.UniSpeechSatForCTC,UniSpeechSatForSequenceClassification:()=>r.UniSpeechSatForSequenceClassification,UniSpeechSatModel:()=>r.UniSpeechSatModel,UniSpeechSatPreTrainedModel:()=>r.UniSpeechSatPreTrainedModel,ViTFeatureExtractor:()=>i.ViTFeatureExtractor,ViTForImageClassification:()=>r.ViTForImageClassification,ViTImageProcessor:()=>i.ViTImageProcessor,ViTModel:()=>r.ViTModel,ViTPreTrainedModel:()=>r.ViTPreTrainedModel,VisionEncoderDecoderModel:()=>r.VisionEncoderDecoderModel,VitMatteForImageMatting:()=>r.VitMatteForImageMatting,VitMatteImageProcessor:()=>i.VitMatteImageProcessor,VitMattePreTrainedModel:()=>r.VitMattePreTrainedModel,VitsModel:()=>r.VitsModel,VitsModelOutput:()=>r.VitsModelOutput,VitsPreTrainedModel:()=>r.VitsPreTrainedModel,VitsTokenizer:()=>a.VitsTokenizer,Wav2Vec2BertForCTC:()=>r.Wav2Vec2BertForCTC,Wav2Vec2BertForSequenceClassification:()=>r.Wav2Vec2BertForSequenceClassification,Wav2Vec2BertModel:()=>r.Wav2Vec2BertModel,Wav2Vec2BertPreTrainedModel:()=>r.Wav2Vec2BertPreTrainedModel,Wav2Vec2CTCTokenizer:()=>a.Wav2Vec2CTCTokenizer,Wav2Vec2FeatureExtractor:()=>i.Wav2Vec2FeatureExtractor,Wav2Vec2ForAudioFrameClassification:()=>r.Wav2Vec2ForAudioFrameClassification,Wav2Vec2ForCTC:()=>r.Wav2Vec2ForCTC,Wav2Vec2ForSequenceClassification:()=>r.Wav2Vec2ForSequenceClassification,Wav2Vec2Model:()=>r.Wav2Vec2Model,Wav2Vec2PreTrainedModel:()=>r.Wav2Vec2PreTrainedModel,Wav2Vec2ProcessorWithLM:()=>i.Wav2Vec2ProcessorWithLM,WavLMForAudioFrameClassification:()=>r.WavLMForAudioFrameClassification,WavLMForCTC:()=>r.WavLMForCTC,WavLMForSequenceClassification:()=>r.WavLMForSequenceClassification,WavLMForXVector:()=>r.WavLMForXVector,WavLMModel:()=>r.WavLMModel,WavLMPreTrainedModel:()=>r.WavLMPreTrainedModel,WhisperFeatureExtractor:()=>i.WhisperFeatureExtractor,WhisperForConditionalGeneration:()=>r.WhisperForConditionalGeneration,WhisperModel:()=>r.WhisperModel,WhisperPreTrainedModel:()=>r.WhisperPreTrainedModel,WhisperProcessor:()=>i.WhisperProcessor,WhisperTokenizer:()=>a.WhisperTokenizer,XLMForQuestionAnswering:()=>r.XLMForQuestionAnswering,XLMForSequenceClassification:()=>r.XLMForSequenceClassification,XLMForTokenClassification:()=>r.XLMForTokenClassification,XLMModel:()=>r.XLMModel,XLMPreTrainedModel:()=>r.XLMPreTrainedModel,XLMRobertaForMaskedLM:()=>r.XLMRobertaForMaskedLM,XLMRobertaForQuestionAnswering:()=>r.XLMRobertaForQuestionAnswering,XLMRobertaForSequenceClassification:()=>r.XLMRobertaForSequenceClassification,XLMRobertaForTokenClassification:()=>r.XLMRobertaForTokenClassification,XLMRobertaModel:()=>r.XLMRobertaModel,XLMRobertaPreTrainedModel:()=>r.XLMRobertaPreTrainedModel,XLMRobertaTokenizer:()=>a.XLMRobertaTokenizer,XLMTokenizer:()=>a.XLMTokenizer,XLMWithLMHeadModel:()=>r.XLMWithLMHeadModel,XVectorOutput:()=>r.XVectorOutput,YolosFeatureExtractor:()=>i.YolosFeatureExtractor,YolosForObjectDetection:()=>r.YolosForObjectDetection,YolosModel:()=>r.YolosModel,YolosObjectDetectionOutput:()=>r.YolosObjectDetectionOutput,YolosPreTrainedModel:()=>r.YolosPreTrainedModel,ZeroShotAudioClassificationPipeline:()=>o.ZeroShotAudioClassificationPipeline,ZeroShotClassificationPipeline:()=>o.ZeroShotClassificationPipeline,ZeroShotImageClassificationPipeline:()=>o.ZeroShotImageClassificationPipeline,ZeroShotObjectDetectionPipeline:()=>o.ZeroShotObjectDetectionPipeline,bankers_round:()=>h.bankers_round,cat:()=>u.cat,cos_sim:()=>h.cos_sim,dot:()=>h.dot,dynamicTimeWarping:()=>u.dynamicTimeWarping,env:()=>n.env,getTopItems:()=>h.getTopItems,hanning:()=>c.hanning,interpolate:()=>u.interpolate,interpolate_data:()=>h.interpolate_data,layer_norm:()=>u.layer_norm,log_softmax:()=>h.log_softmax,magnitude:()=>h.magnitude,max:()=>h.max,mean:()=>u.mean,mean_pooling:()=>u.mean_pooling,medianFilter:()=>h.medianFilter,mel_filter_bank:()=>c.mel_filter_bank,min:()=>h.min,ones:()=>u.ones,ones_like:()=>u.ones_like,permute:()=>u.permute,permute_data:()=>h.permute_data,pipeline:()=>o.pipeline,read_audio:()=>c.read_audio,round:()=>h.round,softmax:()=>h.softmax,spectrogram:()=>c.spectrogram,stack:()=>u.stack,std_mean:()=>u.std_mean,window_function:()=>c.window_function});var o=s(/*! ./pipelines.js */"./src/pipelines.js"),n=s(/*! ./env.js */"./src/env.js"),r=s(/*! ./models.js */"./src/models.js"),a=s(/*! ./tokenizers.js */"./src/tokenizers.js"),i=s(/*! ./processors.js */"./src/processors.js"),l=s(/*! ./configs.js */"./src/configs.js"),c=s(/*! ./utils/audio.js */"./src/utils/audio.js"),d=s(/*! ./utils/image.js */"./src/utils/image.js"),u=s(/*! ./utils/tensor.js */"./src/utils/tensor.js"),h=s(/*! ./utils/maths.js */"./src/utils/maths.js")},"./src/utils/audio.js":
/*!****************************!*\
!*** ./src/utils/audio.js ***!
\****************************/(e,t,s)=>{s.r(t),s.d(t,{hanning:()=>i,mel_filter_bank:()=>h,read_audio:()=>a,spectrogram:()=>_,window_function:()=>m});var o=s(/*! ./hub.js */"./src/utils/hub.js"),n=s(/*! ./maths.js */"./src/utils/maths.js"),r=s(/*! ./core.js */"./src/utils/core.js");async function a(e,t){if("undefined"==typeof AudioContext)throw Error("Unable to load audio from path/URL since `AudioContext` is not available in your environment. Instead, audio data should be passed directly to the pipeline/processor. For more information and some example code, see https://huggingface.co/docs/transformers.js/guides/node-audio-processing.");const s=await(await(0,o.getFile)(e)).arrayBuffer(),n=new AudioContext({sampleRate:t});void 0===t&&console.warn(`No sampling rate provided, using default of ${n.sampleRate}Hz.`);const r=await n.decodeAudioData(s);let a;if(2===r.numberOfChannels){const e=Math.sqrt(2),t=r.getChannelData(0),s=r.getChannelData(1);a=new Float32Array(t.length);for(let o=0;o<r.length;++o)a[o]=e*(t[o]+s[o])/2}else a=r.getChannelData(0);return a}function i(e){if(e<1)return new Float64Array;if(1===e)return new Float64Array([1]);const t=e-1,s=Math.PI/t,o=new Float64Array(e);for(let n=0;n<e;++n){const e=2*n-t;o[n]=.5+.5*Math.cos(s*e)}return o}const l={htk:e=>2595*Math.log10(1+e/700),kaldi:e=>1127*Math.log(1+e/700),slaney:(e,t=1e3,s=15,o=27/Math.log(6.4))=>e>=t?s+Math.log(e/t)*o:3*e/200};function c(e,t="htk"){const s=l[t];if(!s)throw new Error('mel_scale should be one of "htk", "slaney" or "kaldi".');return"number"==typeof e?s(e):e.map((e=>s(e)))}const d={htk:e=>700*(10**(e/2595)-1),kaldi:e=>700*(Math.exp(e/1127)-1),slaney:(e,t=1e3,s=15,o=Math.log(6.4)/27)=>e>=s?t*Math.exp(o*(e-s)):200*e/3};function u(e,t,s){const o=(t-e)/(s-1);return Float64Array.from({length:s},((t,s)=>e+o*s))}function h(e,t,s,o,n,r=null,a="htk",i=!1){if(null!==r&&"slaney"!==r)throw new Error('norm must be one of null or "slaney"');const l=u(c(s,a),c(o,a),t+2);let h,p=function(e,t="htk"){const s=d[t];if(!s)throw new Error('mel_scale should be one of "htk", "slaney" or "kaldi".');return"number"==typeof e?s(e):e.map((e=>s(e)))}(l,a);if(i){const t=n/(2*e);h=c(Float64Array.from({length:e},((e,s)=>s*t)),a),p=l}else h=u(0,Math.floor(n/2),e);const _=function(e,t){const s=Float64Array.from({length:t.length-1},((e,s)=>t[s+1]-t[s])),o=Array.from({length:e.length},(()=>new Array(t.length)));for(let s=0;s<e.length;++s){const n=o[s];for(let o=0;o<t.length;++o)n[o]=t[o]-e[s]}const n=t.length-2,r=Array.from({length:n},(()=>new Array(e.length)));for(let t=0;t<e.length;++t){const e=o[t];for(let o=0;o<n;++o){const n=-e[o]/s[o],a=e[o+2]/s[o+1];r[o][t]=Math.max(0,Math.min(n,a))}}return r}(h,p);if(null!==r&&"slaney"===r)for(let s=0;s<t;++s){const t=_[s],o=2/(p[s+2]-p[s]);for(let s=0;s<e;++s)t[s]*=o}return _}function p(e,t,s,o,r){if(s<=0)throw new Error("reference must be greater than zero");if(o<=0)throw new Error("min_value must be greater than zero");s=Math.max(o,s);const a=Math.log10(s);for(let s=0;s<e.length;++s)e[s]=t*Math.log10(Math.max(o,e[s])-a);if(null!==r){if(r<=0)throw new Error("db_range must be greater than zero");const t=(0,n.max)(e)[0]-r;for(let s=0;s<e.length;++s)e[s]=Math.max(e[s],t)}return e}function _(e,t,s,o,{fft_length:a=null,power:i=1,center:l=!0,pad_mode:c="reflect",onesided:d=!0,preemphasis:u=null,mel_filters:h=null,mel_floor:_=1e-10,log_mel:m=null,reference:f=1,min_value:g=1e-10,db_range:M=null,remove_dc_offset:w=null,max_num_frames:T=null,do_pad:k=!0,transpose:b=!1}={}){const x=t.length;if(null===a&&(a=s),s>a)throw Error(`frame_length (${s}) may not be larger than fft_length (${a})`);if(x!==s)throw new Error(`Length of the window (${x}) must equal frame_length (${s})`);if(o<=0)throw new Error("hop_length must be greater than zero");if(l){if("reflect"!==c)throw new Error(`pad_mode="${c}" not implemented yet.`);const t=Math.floor((a-1)/2)+1;e=function(e,t,s){const o=new e.constructor(e.length+t+s),n=e.length-1;for(let s=0;s<e.length;++s)o[t+s]=e[s];for(let s=1;s<=t;++s)o[t-s]=e[(0,r.calculateReflectOffset)(s,n)];for(let a=1;a<=s;++a)o[n+t+a]=e[(0,r.calculateReflectOffset)(n-a,n)];return o}(e,t,t)}const y=Math.floor(1+Math.floor((e.length-s)/o)),F=d?Math.floor(a/2)+1:a;let C=y,P=y;null!==T&&(T>y?k&&(P=T):P=C=T);const v=new n.FFT(a),S=new Float64Array(a),A=new Float64Array(v.outputBufferSize),L=new Array(C);for(let n=0;n<C;++n){const r=n*o;for(let t=0;t<s;++t)S[t]=e[r+t];if(w){let e=0;for(let t=0;t<s;++t)e+=S[t];const t=e/s;for(let e=0;e<s;++e)S[e]-=t}if(null!==u){for(let e=s-1;e>=1;--e)S[e]-=u*S[e-1];S[0]*=1-u}for(let e=0;e<t.length;++e)S[e]*=t[e];v.realTransform(A,S);const a=new Array(F);for(let e=0;e<a.length;++e){const t=e<<1;a[e]=A[t]**2+A[t+1]**2}L[n]=a}if(null!==i&&2!==i){const e=2/i;for(let t=0;t<L.length;++t){const s=L[t];for(let t=0;t<s.length;++t)s[t]**=e}}const E=h.length,z=new Float32Array(E*P),B=b?[P,E]:[E,P];for(let e=0;e<E;++e){const t=h[e];for(let s=0;s<C;++s){const o=L[s];let n=0;for(let e=0;e<F;++e)n+=t[e]*o[e];z[b?s*E+e:e*C+s]=Math.max(_,n)}}if(null!==i&&null!==m){const e=Math.min(z.length,C*E);switch(m){case"log":for(let t=0;t<e;++t)z[t]=Math.log(z[t]);break;case"log10":for(let t=0;t<e;++t)z[t]=Math.log10(z[t]);break;case"dB":if(1===i)!function(e,t=1,s=1e-5,o=null){p(e,20,t,s,o)}(z,f,g,M);else{if(2!==i)throw new Error(`Cannot use log_mel option '${m}' with power ${i}`);!function(e,t=1,s=1e-10,o=null){p(e,10,t,s,o)}(z,f,g,M)}break;default:throw new Error(`log_mel must be one of null, 'log', 'log10' or 'dB'. Got '${m}'`)}}return{data:z,dims:B}}function m(e,t,{periodic:s=!0,frame_length:o=null,center:n=!0}={}){const r=s?e+1:e;let a;switch(t){case"boxcar":a=new Float64Array(r).fill(1);break;case"hann":case"hann_window":a=i(r);break;case"povey":a=i(r).map((e=>Math.pow(e,.85)));break;default:throw new Error(`Unknown window type ${t}.`)}if(s&&(a=a.subarray(0,e)),null===o)return a;if(e>o)throw new Error(`Length of the window (${e}) may not be larger than frame_length (${o})`);return a}},"./src/utils/core.js":
/*!***************************!*\
!*** ./src/utils/core.js ***!
\***************************/(e,t,s)=>{function o(e,t){e&&e(t)}function n(e){return Object.fromEntries(Object.entries(e).map((([e,t])=>[t,e])))}function r(e){return e.replace(/[.*+?^${}()|[\]\\]/g,"\\$&")}s.r(t),s.d(t,{Callable:()=>a,calculateDimensions:()=>d,calculateReflectOffset:()=>_,dispatchCallback:()=>o,escapeRegExp:()=>r,exists:()=>c,isIntegralNumber:()=>l,isTypedArray:()=>i,mergeArrays:()=>h,pop:()=>u,product:()=>p,reverseDictionary:()=>n});const a=class{constructor(){let e=function(...t){return e._call(...t)};return Object.setPrototypeOf(e,new.target.prototype)}_call(...e){throw Error("Must implement _call method in subclass")}};function i(e){return"TypedArray"===e?.prototype?.__proto__?.constructor?.name}function l(e){return Number.isInteger(e)||"bigint"==typeof e}function c(e){return null!=e}function d(e){const t=[];let s=e;for(;Array.isArray(s);)t.push(s.length),s=s[0];return t}function u(e,t,s=void 0){const o=e[t];if(void 0!==o)return delete e[t],o;if(void 0===s)throw Error(`Key ${t} does not exist in object.`);return s}function h(...e){return Array.prototype.concat.apply([],e)}function p(...e){return e.reduce(((e,t)=>e.flatMap((e=>t.map((t=>[e,t]))))))}function _(e,t){return Math.abs((e+t)%(2*t)-t)}},"./src/utils/data-structures.js":
/*!**************************************!*\
!*** ./src/utils/data-structures.js ***!
\**************************************/(e,t,s)=>{s.r(t),s.d(t,{CharTrie:()=>n,PriorityQueue:()=>o,TokenLattice:()=>a});class o{constructor(e=((e,t)=>e>t)){this._heap=[],this._comparator=e}get size(){return this._heap.length}isEmpty(){return 0===this.size}peek(){return this._heap[0]}push(...e){return this.extend(e)}extend(e){for(const t of e)this._heap.push(t),this._siftUp();return this.size}pop(){const e=this.peek(),t=this.size-1;return t>0&&this._swap(0,t),this._heap.pop(),this._siftDown(),e}replace(e){const t=this.peek();return this._heap[0]=e,this._siftDown(),t}_parent(e){return(e+1>>>1)-1}_left(e){return 1+(e<<1)}_right(e){return e+1<<1}_greater(e,t){return this._comparator(this._heap[e],this._heap[t])}_swap(e,t){const s=this._heap[e];this._heap[e]=this._heap[t],this._heap[t]=s}_siftUp(){let e=this.size-1;for(;e>0&&this._greater(e,this._parent(e));)this._swap(e,this._parent(e)),e=this._parent(e)}_siftDown(){let e=0;for(;this._left(e)<this.size&&this._greater(this._left(e),e)||this._right(e)<this.size&&this._greater(this._right(e),e);){const t=this._right(e)<this.size&&this._greater(this._right(e),this._left(e))?this._right(e):this._left(e);this._swap(e,t),e=t}}}class n{constructor(){this.root=r.default()}extend(e){for(let t of e)this.push(t)}push(e){let t=this.root;for(let s of e){let e=t.children.get(s);void 0===e&&(e=r.default(),t.children.set(s,e)),t=e}t.isLeaf=!0}*commonPrefixSearch(e){let t=this.root,s="";for(let o=0;o<e.length&&void 0!==t;++o){const n=e[o];s+=n,t=t.children.get(n),void 0!==t&&t.isLeaf&&(yield s)}}}class r{constructor(e,t){this.isLeaf=e,this.children=t}static default(){return new r(!1,new Map)}}class a{constructor(e,t,s){this.sentence=e,this.len=e.length,this.bosTokenId=t,this.eosTokenId=s,this.nodes=[],this.beginNodes=Array.from({length:this.len+1},(()=>[])),this.endNodes=Array.from({length:this.len+1},(()=>[]));const o=new i(this.bosTokenId,0,0,0,0),n=new i(this.eosTokenId,1,this.len,0,0);this.nodes.push(o.clone()),this.nodes.push(n.clone()),this.beginNodes[this.len].push(n),this.endNodes[0].push(o)}insert(e,t,s,o){const n=this.nodes.length,r=new i(o,n,e,t,s);this.beginNodes[e].push(r),this.endNodes[e+t].push(r),this.nodes.push(r)}viterbi(){const e=this.len;let t=0;for(;t<=e;){if(0==this.beginNodes[t].length)return[];for(let e of this.beginNodes[t]){e.prev=null;let s=0,o=null;for(let n of this.endNodes[t]){const t=n.backtraceScore+e.score;(null===o||t>s)&&(o=n.clone(),s=t)}if(null===o)return[];e.prev=o,e.backtraceScore=s}++t}const s=[],o=this.beginNodes[e][0].prev;if(null===o)return[];let n=o.clone();for(;null!==n.prev;){s.push(n.clone());const e=n.clone();n=e.prev.clone()}return s.reverse(),s}piece(e){return this.sentence.slice(e.pos,e.pos+e.length)}tokens(){return this.viterbi().map((e=>this.piece(e)))}tokenIds(){return this.viterbi().map((e=>e.tokenId))}}class i{constructor(e,t,s,o,n){this.tokenId=e,this.nodeId=t,this.pos=s,this.length=o,this.score=n,this.prev=null,this.backtraceScore=0}clone(){const e=new i(this.tokenId,this.nodeId,this.pos,this.length,this.score);return e.prev=this.prev,e.backtraceScore=this.backtraceScore,e}}},"./src/utils/generation.js":
/*!*********************************!*\
!*** ./src/utils/generation.js ***!
\*********************************/(e,t,s)=>{s.r(t),s.d(t,{ForceTokensLogitsProcessor:()=>i,ForcedBOSTokenLogitsProcessor:()=>l,ForcedEOSTokenLogitsProcessor:()=>c,GenerationConfig:()=>g,LogitsProcessor:()=>a,LogitsProcessorList:()=>r,MinLengthLogitsProcessor:()=>_,MinNewTokensLengthLogitsProcessor:()=>m,NoBadWordsLogitsProcessor:()=>f,NoRepeatNGramLogitsProcessor:()=>h,RepetitionPenaltyLogitsProcessor:()=>p,Sampler:()=>M,SuppressTokensAtBeginLogitsProcessor:()=>d,WhisperTimeStampLogitsProcessor:()=>u});s(/*! ./tensor.js */"./src/utils/tensor.js");var o=s(/*! ./core.js */"./src/utils/core.js"),n=s(/*! ./maths.js */"./src/utils/maths.js");class r extends o.Callable{constructor(){super(),this.processors=[]}push(e){this.processors.push(e)}extend(e){this.processors.push(...e)}_call(e,t){for(let s of t)this.processors.forEach((t=>t(e,s)))}[Symbol.iterator](){return this.processors.values()}}class a extends o.Callable{_call(e,t){throw Error("`_call` should be implemented in a subclass")}}class i extends a{constructor(e){super(),this.force_token_map=Object.fromEntries(e??[])}_call(e,t){let s=this.force_token_map[e.length];return(0,o.exists)(s)&&(t.data.fill(-1/0),t.data[s]=0),t}}class l extends a{constructor(e){super(),this.bos_token_id=e}_call(e,t){return 1===e.length&&(t.data.fill(-1/0),t.data[this.bos_token_id]=0),t}}class c extends a{constructor(e,t){super(),this.max_length=e,this.forced_eos_token_id=t}_call(e,t){}}class d extends a{constructor(e,t){super(),this.begin_suppress_tokens=e,this.begin_index=t}_call(e,t){if(e.length===this.begin_index)for(let e of this.begin_suppress_tokens)t.data[e]=-1/0;return t}}class u extends a{constructor(e){super(),this.eos_token_id=e.eos_token_id,this.no_timestamps_token_id=e.no_timestamps_token_id,this.timestamp_begin=this.no_timestamps_token_id+1,this.begin_index=(e.forced_decoder_ids||[]).length+2,e.forced_decoder_ids.slice(-1)[0][1]===this.no_timestamps_token_id&&(this.begin_index-=1),this.max_initial_timestamp_index=e.max_initial_timestamp_index}_call(e,t){const s=t.data;if(s[this.no_timestamps_token_id]=-1/0,e.length===this.begin_index-1)return s.fill(-1/0),s[this.timestamp_begin]=0,t;const o=e.slice(this.begin_index),r=o.length>=1&&o[o.length-1]>=this.timestamp_begin,a=o.length<2||o[o.length-2]>=this.timestamp_begin;if(r&&(a?s.subarray(this.timestamp_begin).fill(-1/0):s.subarray(0,this.eos_token_id).fill(-1/0)),e.length===this.begin_index&&null!==this.max_initial_timestamp_index){const e=this.timestamp_begin+this.max_initial_timestamp_index;s.subarray(e+1).fill(-1/0)}const i=(0,n.log_softmax)(s);return Math.log(i.subarray(this.timestamp_begin).map(Math.exp).reduce(((e,t)=>e+t)))>(0,n.max)(i.subarray(0,this.timestamp_begin))[0]&&s.subarray(0,this.timestamp_begin).fill(-1/0),t}}class h extends a{constructor(e){super(),this.no_repeat_ngram_size=e}getNgrams(e){const t=e.length,s=[];for(let o=0;o<t+1-this.no_repeat_ngram_size;++o){const t=[];for(let s=0;s<this.no_repeat_ngram_size;++s)t.push(e[o+s]);s.push(t)}const o=new Map;for(const e of s){const t=e.slice(0,e.length-1),s=JSON.stringify(t),n=o.get(s)??[];n.push(e[e.length-1]),o.set(s,n)}return o}getGeneratedNgrams(e,t){const s=t.slice(t.length+1-this.no_repeat_ngram_size,t.length);return e.get(JSON.stringify(s))??[]}calcBannedNgramTokens(e){const t=[];if(e.length+1<this.no_repeat_ngram_size)return t;{const t=this.getNgrams(e);return this.getGeneratedNgrams(t,e)}}_call(e,t){const s=this.calcBannedNgramTokens(e);for(const e of s)t.data[e]=-1/0;return t}}class p extends a{constructor(e){super(),this.penalty=e}_call(e,t){for(const s of e)t.data[s]<0?t.data[s]*=this.penalty:t.data[s]/=this.penalty;return t}}class _ extends a{constructor(e,t){super(),this.min_length=e,this.eos_token_id=Array.isArray(t)?t:[t]}_call(e,t){if(e.length<this.min_length)for(const e of this.eos_token_id)t.data[e]=-1/0;return t}}class m extends a{constructor(e,t,s){super(),this.prompt_length_to_skip=e,this.min_new_tokens=t,this.eos_token_id=Array.isArray(s)?s:[s]}_call(e,t){if(e.length-this.prompt_length_to_skip<this.min_new_tokens)for(const e of this.eos_token_id)t.data[e]=-1/0;return t}}class f extends a{constructor(e,t){super(),this.bad_words_ids=e,this.eos_token_id=Array.isArray(t)?t:[t]}_call(e,t){for(const s of this.bad_words_ids){let o=!0;for(let t=1;t<=s.length-1&&s.length<e.length;++t)if(s.at(-t-1)!==e.at(-t)){o=!1;break}o&&(t.data[s.at(-1)]=-1/0)}return t}}const g=class{constructor(e={}){this.max_length=e.max_length??20,this.max_new_tokens=e.max_new_tokens??null,this.min_length=e.min_length??0,this.min_new_tokens=e.min_new_tokens??null,this.early_stopping=e.early_stopping??!1,this.max_time=e.max_time??null,this.do_sample=e.do_sample??!1,this.num_beams=e.num_beams??1,this.num_beam_groups=e.num_beam_groups??1,this.penalty_alpha=e.penalty_alpha??null,this.use_cache=e.use_cache??!0,this.temperature=e.temperature??1,this.top_k=e.top_k??50,this.top_p=e.top_p??1,this.typical_p=e.typical_p??1,this.epsilon_cutoff=e.epsilon_cutoff??0,this.eta_cutoff=e.eta_cutoff??0,this.diversity_penalty=e.diversity_penalty??0,this.repetition_penalty=e.repetition_penalty??1,this.encoder_repetition_penalty=e.encoder_repetition_penalty??1,this.length_penalty=e.length_penalty??1,this.no_repeat_ngram_size=e.no_repeat_ngram_size??0,this.bad_words_ids=e.bad_words_ids??null,this.force_words_ids=e.force_words_ids??null,this.renormalize_logits=e.renormalize_logits??!1,this.constraints=e.constraints??null,this.forced_bos_token_id=e.forced_bos_token_id??null,this.forced_eos_token_id=e.forced_eos_token_id??null,this.remove_invalid_values=e.remove_invalid_values??!1,this.exponential_decay_length_penalty=e.exponential_decay_length_penalty??null,this.suppress_tokens=e.suppress_tokens??null,this.begin_suppress_tokens=e.begin_suppress_tokens??null,this.forced_decoder_ids=e.forced_decoder_ids??null,this.num_return_sequences=e.num_return_sequences??1,this.output_attentions=e.output_attentions??!1,this.output_hidden_states=e.output_hidden_states??!1,this.output_scores=e.output_scores??!1,this.return_dict_in_generate=e.return_dict_in_generate??!1,this.pad_token_id=e.pad_token_id??null,this.bos_token_id=e.bos_token_id??null,this.eos_token_id=e.eos_token_id??null,this.encoder_no_repeat_ngram_size=e.encoder_no_repeat_ngram_size??0,this.decoder_start_token_id=e.decoder_start_token_id??null,this.generation_kwargs=e.generation_kwargs??{}}};class M extends o.Callable{constructor(e){super(),this.generation_config=e}_call(e,t=-1){return this.sample(e,t)}sample(e,t){throw Error("sample should be implemented in subclasses.")}getLogits(e,t){let s=e.dims.at(-1),o=e.data;if(-1===t)o=o.slice(-s);else{let e=t*s;o=o.slice(e,e+s)}return this.generation_config.temperature>0&&(o=o.map((e=>e/this.generation_config.temperature))),o}randomSelect(e){let t=e.reduce(((e,t)=>e+t),0),s=Math.random()*t;for(let t=0;t<e.length;++t)if(s-=e[t],s<=0)return t;return 0}static getSampler(e){if(e.do_sample)return new T(e);if(e.num_beams>1)return new k(e);if(e.num_return_sequences>1)throw Error(`num_return_sequences has to be 1 when doing greedy search, but is ${e.num_return_sequences}.`);return new w(e)}}class w extends M{sample(e,t=-1){let s=this.getLogits(e,t);return[[(0,n.max)(s)[1],0]]}}class T extends M{sample(e,t=-1){let s=e.dims.at(-1);this.generation_config.top_k>0&&(s=Math.min(this.generation_config.top_k,s));const o=this.getLogits(e,t),r=(0,n.getTopItems)(o,s),a=(0,n.softmax)(r.map((e=>e[1])));return Array.from({length:this.generation_config.num_beams},(()=>{const e=this.randomSelect(a);return[r[e][0],Math.log(a[e])]}))}}class k extends M{sample(e,t=-1){let s=e.dims.at(-1);this.generation_config.top_k>0&&(s=Math.min(this.generation_config.top_k,s));const o=this.getLogits(e,t),r=(0,n.getTopItems)(o,s),a=(0,n.softmax)(r.map((e=>e[1])));return Array.from({length:this.generation_config.num_beams},((e,t)=>[r[t][0],Math.log(a[t])]))}}},"./src/utils/hub.js":
/*!**************************!*\
!*** ./src/utils/hub.js ***!
\**************************/(e,t,s)=>{s.r(t),s.d(t,{getFile:()=>d,getModelFile:()=>p,getModelJSON:()=>_});var o=s(/*! fs */"?7a2c"),n=s(/*! path */"?a42a"),r=s(/*! stream/web */"?e65c"),a=s(/*! ../env.js */"./src/env.js"),i=s(/*! ./core.js */"./src/utils/core.js");globalThis.ReadableStream||(globalThis.ReadableStream=r.ReadableStream);class l{_CONTENT_TYPE_MAP={txt:"text/plain",html:"text/html",css:"text/css",js:"text/javascript",json:"application/json",png:"image/png",jpg:"image/jpeg",jpeg:"image/jpeg",gif:"image/gif"};constructor(e){if(this.filePath=e,this.headers=new Headers,this.exists=o.existsSync(e),this.exists){this.status=200,this.statusText="OK";let t=o.statSync(e);this.headers.set("content-length",t.size.toString()),this.updateContentType();let s=this;this.body=new ReadableStream({start(e){s.arrayBuffer().then((t=>{e.enqueue(new Uint8Array(t)),e.close()}))}})}else this.status=404,this.statusText="Not Found",this.body=null}updateContentType(){const e=this.filePath.toString().split(".").pop().toLowerCase();this.headers.set("content-type",this._CONTENT_TYPE_MAP[e]??"application/octet-stream")}clone(){let e=new l(this.filePath);return e.exists=this.exists,e.status=this.status,e.statusText=this.statusText,e.headers=new Headers(this.headers),e}async arrayBuffer(){return(await o.promises.readFile(this.filePath)).buffer}async blob(){const e=await o.promises.readFile(this.filePath);return new Blob([e],{type:this.headers.get("content-type")})}async text(){return await o.promises.readFile(this.filePath,"utf8")}async json(){return JSON.parse(await this.text())}}function c(e,t=null){let s;try{s=new URL(e)}catch(e){return!1}return!(t&&!t.includes(s.hostname))&&("http:"===s.protocol||"https:"===s.protocol)}async function d(e){if(a.env.useFS&&!c(e))return new l(e);if("undefined"!=typeof process&&"node"===process?.release?.name){const t=!!process.env?.TESTING_REMOTELY,s=a.env.version,o=new Headers;o.set("User-Agent",`transformers.js/${s}; is_ci/${t};`);if(c(e,["huggingface.co","hf.co"])){const e=process.env?.HF_TOKEN??process.env?.HF_ACCESS_TOKEN;e&&o.set("Authorization",`Bearer ${e}`)}return fetch(e,{headers:o})}return fetch(e)}const u={400:"Bad request error occurred while trying to load file",401:"Unauthorized access to file",403:"Forbidden access to file",404:"Could not locate file",408:"Request timeout error occurred while trying to load file",500:"Internal server error error occurred while trying to load file",502:"Bad gateway error occurred while trying to load file",503:"Service unavailable error occurred while trying to load file",504:"Gateway timeout error occurred while trying to load file"};class h{constructor(e){this.path=e}async match(e){let t=n.join(this.path,e),s=new l(t);return s.exists?s:void 0}async put(e,t){const s=Buffer.from(await t.arrayBuffer());let r=n.join(this.path,e);try{await o.promises.mkdir(n.dirname(r),{recursive:!0}),await o.promises.writeFile(r,s)}catch(e){console.warn("An error occurred while writing the file to cache:",e)}}}async function p(e,t,s=!0,o={}){if(!a.env.allowLocalModels){if(o.local_files_only)throw Error("Invalid configuration detected: local models are disabled (`env.allowLocalModels=false`) but you have requested to only use local models (`local_files_only=true`).");if(!a.env.allowRemoteModels)throw Error("Invalid configuration detected: both local and remote models are disabled. Fix by setting `env.allowLocalModels` or `env.allowRemoteModels` to `true`.")}let n;if((0,i.dispatchCallback)(o.progress_callback,{status:"initiate",name:e,file:t}),!n&&a.env.useBrowserCache){if("undefined"==typeof caches)throw Error("Browser cache is not available in this environment.");try{n=await caches.open("transformers-cache")}catch(e){console.warn("An error occurred while opening the browser cache:",e)}}if(!n&&a.env.useFSCache&&(n=new h(o.cache_dir??a.env.cacheDir)),!n&&a.env.useCustomCache){if(!a.env.customCache)throw Error("`env.useCustomCache=true`, but `env.customCache` is not defined.");if(!a.env.customCache.match||!a.env.customCache.put)throw new Error("`env.customCache` must be an object which implements the `match` and `put` functions of the Web Cache API. For more information, see https://developer.mozilla.org/en-US/docs/Web/API/Cache");n=a.env.customCache}const r=o.revision??"main";let l,p,_=m(e,t),f=m(a.env.localModelPath,_),g=m(a.env.remoteHost,a.env.remotePathTemplate.replaceAll("{model}",e).replaceAll("{revision}",encodeURIComponent(r)),t),M="main"===r?_:m(e,r,t),w=n instanceof h?M:g,T=!1;n&&(p=await async function(e,...t){for(let s of t)try{let t=await e.match(s);if(t)return t}catch(e){continue}}(n,f,w));const k=void 0!==p;if(void 0===p){if(a.env.allowLocalModels){if(c(_)){if(o.local_files_only)throw new Error(`\`local_files_only=true\`, but attempted to load a remote file from: ${_}.`);if(!a.env.allowRemoteModels)throw new Error(`\`env.allowRemoteModels=false\`, but attempted to load a remote file from: ${_}.`)}else try{p=await d(f),l=f}catch(e){console.warn(`Unable to load from local path "${f}": "${e}"`)}}if(void 0===p||404===p.status){if(o.local_files_only||!a.env.allowRemoteModels){if(s)throw Error(`\`local_files_only=true\` or \`env.allowRemoteModels=false\` and file was not found locally at "${f}".`);return null}if(p=await d(g),200!==p.status)return function(e,t,s){if(!s)return null;const o=u[e]??`Error (${e}) occurred while trying to load file`;throw Error(`${o}: "${t}".`)}(p.status,g,s);l=w}T=n&&"undefined"!=typeof Response&&p instanceof Response&&200===p.status}(0,i.dispatchCallback)(o.progress_callback,{status:"download",name:e,file:t});const b={status:"progress",name:e,file:t};let x;return o.progress_callback?k&&"undefined"!=typeof navigator&&/firefox/i.test(navigator.userAgent)?(x=new Uint8Array(await p.arrayBuffer()),(0,i.dispatchCallback)(o.progress_callback,{...b,progress:100,loaded:x.length,total:x.length})):x=await async function(e,t){const s=e.headers.get("Content-Length");null===s&&console.warn("Unable to determine content-length from response headers. Will expand buffer when needed.");let o=parseInt(s??"0"),n=new Uint8Array(o),r=0;const a=e.body.getReader();async function i(){const{done:e,value:s}=await a.read();if(e)return;let l=r+s.length;if(l>o){o=l;let e=new Uint8Array(o);e.set(n),n=e}n.set(s,r),r=l;return t({progress:r/o*100,loaded:r,total:o}),i()}return await i(),n}(p,(e=>{(0,i.dispatchCallback)(o.progress_callback,{...b,...e})})):x=new Uint8Array(await p.arrayBuffer()),T&&l&&void 0===await n.match(l)&&await n.put(l,new Response(x,{headers:p.headers})).catch((e=>{console.warn(`Unable to add response to browser cache: ${e}.`)})),(0,i.dispatchCallback)(o.progress_callback,{status:"done",name:e,file:t}),x}async function _(e,t,s=!0,o={}){let n=await p(e,t,s,o);if(null===n)return{};let r=new TextDecoder("utf-8").decode(n);return JSON.parse(r)}function m(...e){return(e=e.map(((t,s)=>(s&&(t=t.replace(new RegExp("^/"),"")),s!==e.length-1&&(t=t.replace(new RegExp("/$"),"")),t)))).join("/")}},"./src/utils/image.js":
/*!****************************!*\
!*** ./src/utils/image.js ***!
\****************************/(e,t,s)=>{s.r(t),s.d(t,{RawImage:()=>_});var o=s(/*! ./hub.js */"./src/utils/hub.js"),n=s(/*! ../env.js */"./src/env.js"),r=s(/*! ./tensor.js */"./src/utils/tensor.js"),a=s(/*! sharp */"?2b25");const i="undefined"!=typeof self,l=i&&"DedicatedWorkerGlobalScope"===self.constructor.name;let c,d,u;if(i)c=(e,t)=>{if(!self.OffscreenCanvas)throw new Error("OffscreenCanvas not supported by this browser.");return new self.OffscreenCanvas(e,t)},u=self.createImageBitmap,d=self.ImageData;else{if(!a)throw new Error("Unable to load image processing library.");u=async e=>{const t=(await e.metadata()).channels;let{data:s,info:o}=await e.raw().toBuffer({resolveWithObject:!0});const n=new _(new Uint8ClampedArray(s),o.width,o.height,o.channels);return void 0!==t&&t!==o.channels&&n.convert(t),n}}const h={0:"nearest",1:"lanczos",2:"bilinear",3:"bicubic",4:"box",5:"hamming"},p=new Map([["png","image/png"],["jpg","image/jpeg"],["jpeg","image/jpeg"],["gif","image/gif"]]);class _{constructor(e,t,s,o){this.data=e,this.width=t,this.height=s,this.channels=o}get size(){return[this.width,this.height]}static async read(e){if(e instanceof _)return e;if("string"==typeof e||e instanceof URL)return await this.fromURL(e);throw new Error("Unsupported input type: "+typeof e)}static async fromURL(e){let t=await(0,o.getFile)(e);if(200!==t.status)throw new Error(`Unable to read image from "${e}" (${t.status} ${t.statusText})`);let s=await t.blob();return this.fromBlob(s)}static async fromBlob(e){if(i){let t=await u(e);const s=c(t.width,t.height).getContext("2d");return s.drawImage(t,0,0),new this(s.getImageData(0,0,t.width,t.height).data,t.width,t.height,4)}{let t=a(await e.arrayBuffer());return await u(t)}}static fromTensor(e,t="CHW"){if(3!==e.dims.length)throw new Error(`Tensor should have 3 dimensions, but has ${e.dims.length} dimensions.`);if("CHW"===t)e=e.transpose(1,2,0);else if("HWC"!==t)throw new Error(`Unsupported channel format: ${t}`);if(!(e.data instanceof Uint8ClampedArray||e.data instanceof Uint8Array))throw new Error(`Unsupported tensor type: ${e.type}`);switch(e.dims[2]){case 1:case 2:case 3:case 4:return new _(e.data,e.dims[1],e.dims[0],e.dims[2]);default:throw new Error(`Unsupported number of channels: ${e.dims[2]}`)}}grayscale(){if(1===this.channels)return this;let e=new Uint8ClampedArray(this.width*this.height*1);switch(this.channels){case 3:case 4:for(let t=0,s=0;t<this.data.length;t+=this.channels){const o=this.data[t],n=this.data[t+1],r=this.data[t+2];e[s++]=Math.round(.2989*o+.587*n+.114*r)}break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,1)}rgb(){if(3===this.channels)return this;let e=new Uint8ClampedArray(this.width*this.height*3);switch(this.channels){case 1:for(let t=0,s=0;t<this.data.length;++t)e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=this.data[t];break;case 4:for(let t=0,s=0;t<this.data.length;t+=4)e[s++]=this.data[t],e[s++]=this.data[t+1],e[s++]=this.data[t+2];break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,3)}rgba(){if(4===this.channels)return this;let e=new Uint8ClampedArray(this.width*this.height*4);switch(this.channels){case 1:for(let t=0,s=0;t<this.data.length;++t)e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=this.data[t],e[s++]=255;break;case 3:for(let t=0,s=0;t<this.data.length;t+=3)e[s++]=this.data[t],e[s++]=this.data[t+1],e[s++]=this.data[t+2],e[s++]=255;break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this._update(e,this.width,this.height,4)}async resize(e,t,{resample:s=2}={}){let o=h[s]??s;if(i){let s=this.channels,o=this.toCanvas();const n=c(e,t).getContext("2d");return n.drawImage(o,0,0,e,t),new _(n.getImageData(0,0,e,t).data,e,t,4).convert(s)}{let s=this.toSharp();switch(o){case"box":case"hamming":"box"!==o&&"hamming"!==o||(console.warn(`Resampling method ${o} is not yet supported. Using bilinear instead.`),o="bilinear");case"nearest":case"bilinear":case"bicubic":s=s.affine([e/this.width,0,0,t/this.height],{interpolator:o});break;case"lanczos":s=s.resize({width:e,height:t,fit:"fill",kernel:"lanczos3"});break;default:throw new Error(`Resampling method ${o} is not supported.`)}return await u(s)}}async pad([e,t,s,o]){if(e=Math.max(e,0),t=Math.max(t,0),s=Math.max(s,0),o=Math.max(o,0),0===e&&0===t&&0===s&&0===o)return this;if(i){let n=this.channels,r=this.toCanvas(),a=this.width+e+t,i=this.height+s+o;const l=c(a,i).getContext("2d");return l.drawImage(r,0,0,this.width,this.height,e,s,a,i),new _(l.getImageData(0,0,a,i).data,a,i,4).convert(n)}{let n=this.toSharp().extend({left:e,right:t,top:s,bottom:o});return await u(n)}}async crop([e,t,s,o]){if(e=Math.max(e,0),t=Math.max(t,0),s=Math.min(s,this.width-1),o=Math.min(o,this.height-1),0===e&&0===t&&s===this.width-1&&o===this.height-1)return this;const n=s-e+1,r=o-t+1;if(i){const s=this.channels,o=this.toCanvas(),a=c(n,r).getContext("2d");a.drawImage(o,e,t,n,r,0,0,n,r);return new _(a.getImageData(0,0,n,r).data,n,r,4).convert(s)}{const s=this.toSharp().extract({left:e,top:t,width:n,height:r});return await u(s)}}async center_crop(e,t){if(this.width===e&&this.height===t)return this;let s=(this.width-e)/2,o=(this.height-t)/2;if(i){let n=this.channels,r=this.toCanvas();const a=c(e,t).getContext("2d");let i=0,l=0,d=0,u=0;return s>=0?i=s:d=-s,o>=0?l=o:u=-o,a.drawImage(r,i,l,e,t,d,u,e,t),new _(a.getImageData(0,0,e,t).data,e,t,4).convert(n)}{let n=this.toSharp();if(s>=0&&o>=0)n=n.extract({left:Math.floor(s),top:Math.floor(o),width:e,height:t});else if(s<=0&&o<=0){let r=Math.floor(-o),a=Math.floor(-s);n=n.extend({top:r,left:a,right:e-this.width-a,bottom:t-this.height-r})}else{let r=[0,0],a=0;o<0?(r[0]=Math.floor(-o),r[1]=t-this.height-r[0]):a=Math.floor(o);let i=[0,0],l=0;s<0?(i[0]=Math.floor(-s),i[1]=e-this.width-i[0]):l=Math.floor(s),n=n.extend({top:r[0],bottom:r[1],left:i[0],right:i[1]}).extract({left:l,top:a,width:e,height:t})}return await u(n)}}async toBlob(e="image/png",t=1){if(!i)throw new Error("toBlob() is only supported in browser environments.");const s=this.toCanvas();return await s.convertToBlob({type:e,quality:t})}toTensor(e="CHW"){let t=new r.Tensor("uint8",new Uint8Array(this.data),[this.height,this.width,this.channels]);if("HWC"===e);else{if("CHW"!==e)throw new Error(`Unsupported channel format: ${e}`);t=t.permute(2,0,1)}return t}toCanvas(){if(!i)throw new Error("toCanvas() is only supported in browser environments.");let e=this.clone().rgba(),t=c(e.width,e.height),s=new d(e.data,e.width,e.height);return t.getContext("2d").putImageData(s,0,0),t}_update(e,t,s,o=null){return this.data=e,this.width=t,this.height=s,null!==o&&(this.channels=o),this}clone(){return new _(this.data.slice(),this.width,this.height,this.channels)}convert(e){if(this.channels===e)return this;switch(e){case 1:this.grayscale();break;case 3:this.rgb();break;case 4:this.rgba();break;default:throw new Error(`Conversion failed due to unsupported number of channels: ${this.channels}`)}return this}async save(e){if(!i){if(n.env.useFS){const t=this.toSharp();return await t.toFile(e)}throw new Error("Unable to save the image because filesystem is disabled in this environment.")}{if(l)throw new Error("Unable to save an image from a Web Worker.");const t=e.split(".").pop().toLowerCase(),s=p.get(t)??"image/png",o=await this.toBlob(s),n=URL.createObjectURL(o),r=document.createElement("a");r.href=n,r.download=e,r.click(),r.remove()}}toSharp(){if(i)throw new Error("toSharp() is only supported in server-side environments.");return a(this.data,{raw:{width:this.width,height:this.height,channels:this.channels}})}}},"./src/utils/maths.js":
/*!****************************!*\
!*** ./src/utils/maths.js ***!
\****************************/(e,t,s)=>{function o(e,[t,s,o],[n,r],a="bilinear",i=!1){const l=r/o,c=n/s,d=new e.constructor(n*r*t),u=s*o,h=n*r;for(let a=0;a<n;++a)for(let n=0;n<r;++n){const i=a*r+n,p=(n+.5)/l-.5,_=(a+.5)/c-.5;let m=Math.floor(p),f=Math.floor(_);const g=Math.min(m+1,o-1),M=Math.min(f+1,s-1);m=Math.max(m,0),f=Math.max(f,0);const w=p-m,T=_-f,k=(1-w)*(1-T),b=w*(1-T),x=(1-w)*T,y=w*T,F=f*o,C=M*o,P=F+m,v=F+g,S=C+m,A=C+g;for(let s=0;s<t;++s){const t=s*u;d[s*h+i]=k*e[t+P]+b*e[t+v]+x*e[t+S]+y*e[t+A]}}return d}function n(e,t,s){const o=new Array(s.length),n=new Array(s.length);for(let e=s.length-1,r=1;e>=0;--e)n[e]=r,o[e]=t[s[e]],r*=o[e];const r=s.map(((e,t)=>n[s.indexOf(t)])),a=new e.constructor(e.length);for(let s=0;s<e.length;++s){let o=0;for(let e=t.length-1,n=s;e>=0;--e)o+=n%t[e]*r[e],n=Math.floor(n/t[e]);a[o]=e[s]}return[a,o]}function r(e){const t=h(e)[0],s=e.map((e=>Math.exp(e-t))),o=s.reduce(((e,t)=>e+t),0);return s.map((e=>e/o))}function a(e){return r(e).map((e=>Math.log(e)))}function i(e,t){return e.reduce(((e,s,o)=>e+s*t[o]),0)}function l(e,t=0){return e=Array.from(e).map(((e,t)=>[t,e])).sort(((e,t)=>t[1]-e[1])),null!==t&&t>0&&(e=e.slice(0,t)),e}function c(e,t){return i(e,t)/(d(e)*d(t))}function d(e){return Math.sqrt(e.reduce(((e,t)=>e+t*t),0))}function u(e){if(0===e.length)throw Error("Array must not be empty");let t=e[0],s=0;for(let o=1;o<e.length;++o)e[o]<t&&(t=e[o],s=o);return[t,s]}function h(e){if(0===e.length)throw Error("Array must not be empty");let t=e[0],s=0;for(let o=1;o<e.length;++o)e[o]>t&&(t=e[o],s=o);return[Number(t),s]}function p(e){return e>0&&0==(e&e-1)}s.r(t),s.d(t,{FFT:()=>f,bankers_round:()=>w,cos_sim:()=>c,dot:()=>i,getTopItems:()=>l,interpolate_data:()=>o,log_softmax:()=>a,magnitude:()=>d,max:()=>h,medianFilter:()=>g,min:()=>u,permute_data:()=>n,round:()=>M,softmax:()=>r});class _{constructor(e){if(this.size=0|e,this.size<=1||!p(this.size))throw new Error("FFT size must be a power of two larger than 1");this._csize=e<<1,this.table=new Float64Array(2*this.size);for(let e=0;e<this.table.length;e+=2){const t=Math.PI*e/this.size;this.table[e]=Math.cos(t),this.table[e+1]=-Math.sin(t)}let t=0;for(let e=1;this.size>e;e<<=1)++t;this._width=t%2==0?t-1:t,this._bitrev=new Int32Array(1<<this._width);for(let e=0;e<this._bitrev.length;++e){this._bitrev[e]=0;for(let t=0;t<this._width;t+=2){const s=this._width-t-2;this._bitrev[e]|=(e>>>t&3)<<s}}}createComplexArray(){return new Float64Array(this._csize)}fromComplexArray(e,t){const s=t||new Array(e.length>>>1);for(let t=0;t<e.length;t+=2)s[t>>>1]=e[t];return s}toComplexArray(e,t){const s=t||this.createComplexArray();for(let t=0;t<s.length;t+=2)s[t]=e[t>>>1],s[t+1]=0;return s}completeSpectrum(e){const t=this._csize,s=t>>>1;for(let o=2;o<s;o+=2)e[t-o]=e[o],e[t-o+1]=-e[o+1]}transform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._transform4(e,t,1)}realTransform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._realTransform4(e,t,1)}inverseTransform(e,t){if(e===t)throw new Error("Input and output buffers must be different");this._transform4(e,t,-1);for(let t=0;t<e.length;++t)e[t]/=this.size}_transform4(e,t,s){const o=this._csize;let n,r,a=1<<this._width,i=o/a<<1;const l=this._bitrev;if(4===i)for(n=0,r=0;n<o;n+=i,++r){const s=l[r];this._singleTransform2(t,e,n,s,a)}else for(n=0,r=0;n<o;n+=i,++r){const o=l[r];this._singleTransform4(t,e,n,o,a,s)}for(a>>=2;a>=2;a>>=2){i=o/a<<1;const t=i>>>2;for(n=0;n<o;n+=i){const o=n+t-1;for(let r=n,i=0;r<o;r+=2,i+=a){const o=r,n=o+t,a=n+t,l=a+t,c=e[o],d=e[o+1],u=e[n],h=e[n+1],p=e[a],_=e[a+1],m=e[l],f=e[l+1],g=this.table[i],M=s*this.table[i+1],w=u*g-h*M,T=u*M+h*g,k=this.table[2*i],b=s*this.table[2*i+1],x=p*k-_*b,y=p*b+_*k,F=this.table[3*i],C=s*this.table[3*i+1],P=m*F-f*C,v=m*C+f*F,S=c+x,A=d+y,L=c-x,E=d-y,z=w+P,B=T+v,I=s*(w-P),O=s*(T-v);e[o]=S+z,e[o+1]=A+B,e[n]=L+O,e[n+1]=E-I,e[a]=S-z,e[a+1]=A-B,e[l]=L-O,e[l+1]=E+I}}}}_singleTransform2(e,t,s,o,n){const r=e[o],a=e[o+1],i=e[o+n],l=e[o+n+1];t[s]=r+i,t[s+1]=a+l,t[s+2]=r-i,t[s+3]=a-l}_singleTransform4(e,t,s,o,n,r){const a=2*n,i=3*n,l=e[o],c=e[o+1],d=e[o+n],u=e[o+n+1],h=e[o+a],p=e[o+a+1],_=e[o+i],m=e[o+i+1],f=l+h,g=c+p,M=l-h,w=c-p,T=d+_,k=u+m,b=r*(d-_),x=r*(u-m);t[s]=f+T,t[s+1]=g+k,t[s+2]=M+x,t[s+3]=w-b,t[s+4]=f-T,t[s+5]=g-k,t[s+6]=M-x,t[s+7]=w+b}_realTransform4(e,t,s){const o=this._csize;let n,r,a=1<<this._width,i=o/a<<1;const l=this._bitrev;if(4===i)for(n=0,r=0;n<o;n+=i,++r){const s=l[r];this._singleRealTransform2(t,e,n,s>>>1,a>>>1)}else for(n=0,r=0;n<o;n+=i,++r){const o=l[r];this._singleRealTransform4(t,e,n,o>>>1,a>>>1,s)}for(a>>=2;a>=2;a>>=2){i=o/a<<1;const t=i>>>2;for(n=0;n<o;n+=i){const o=n+t-1;for(let r=n,i=0;r<o;r+=2,i+=a){const o=r,n=o+t,a=n+t,l=a+t,c=e[o],d=e[o+1],u=e[n],h=e[n+1],p=e[a],_=e[a+1],m=e[l],f=e[l+1],g=this.table[i],M=s*this.table[i+1],w=u*g-h*M,T=u*M+h*g,k=this.table[2*i],b=s*this.table[2*i+1],x=p*k-_*b,y=p*b+_*k,F=this.table[3*i],C=s*this.table[3*i+1],P=m*F-f*C,v=m*C+f*F,S=c+x,A=d+y,L=c-x,E=d-y,z=w+P,B=T+v,I=s*(w-P),O=s*(T-v);e[o]=S+z,e[o+1]=A+B,e[n]=L+O,e[n+1]=E-I,e[a]=S-z,e[a+1]=A-B,e[l]=L-O,e[l+1]=E+I}}}}_singleRealTransform2(e,t,s,o,n){const r=e[o],a=e[o+n];t[s]=r+a,t[s+1]=0,t[s+2]=r-a,t[s+3]=0}_singleRealTransform4(e,t,s,o,n,r){const a=2*n,i=3*n,l=e[o],c=e[o+n],d=e[o+a],u=e[o+i],h=l+d,p=l-d,_=c+u,m=r*(c-u);t[s]=h+_,t[s+1]=0,t[s+2]=p,t[s+3]=-m,t[s+4]=h-_,t[s+5]=0,t[s+6]=p,t[s+7]=m}}class m{constructor(e){const t=2*(e-1),s=2*(2*e-1),o=2**Math.ceil(Math.log2(s));this.bufferSize=o,this._a=t;const n=new Float64Array(s),r=new Float64Array(o);this._chirpBuffer=new Float64Array(o),this._buffer1=new Float64Array(o),this._buffer2=new Float64Array(o),this._outBuffer1=new Float64Array(o),this._outBuffer2=new Float64Array(o);const a=-2*Math.PI/e,i=Math.cos(a),l=Math.sin(a);for(let t=0;t<s>>1;++t){const s=(t+1-e)**2/2,o=Math.sqrt(i**2+l**2)**s,a=s*Math.atan2(l,i),c=2*t;n[c]=o*Math.cos(a),n[c+1]=o*Math.sin(a),r[c]=n[c],r[c+1]=-n[c+1]}this._slicedChirpBuffer=n.subarray(t,s),this._f=new _(o>>1),this._f.transform(this._chirpBuffer,r)}_transform(e,t,s){const o=this._buffer1,n=this._buffer2,r=this._outBuffer1,a=this._outBuffer2,i=this._chirpBuffer,l=this._slicedChirpBuffer,c=this._a;if(s)for(let e=0;e<l.length;e+=2){const s=e+1,n=t[e>>1];o[e]=n*l[e],o[s]=n*l[s]}else for(let e=0;e<l.length;e+=2){const s=e+1;o[e]=t[e]*l[e]-t[s]*l[s],o[s]=t[e]*l[s]+t[s]*l[e]}this._f.transform(r,o);for(let e=0;e<i.length;e+=2){const t=e+1;n[e]=r[e]*i[e]-r[t]*i[t],n[t]=r[e]*i[t]+r[t]*i[e]}this._f.inverseTransform(a,n);for(let t=0;t<a.length;t+=2){const s=a[t+c],o=a[t+c+1],n=l[t],r=l[t+1];e[t]=s*n-o*r,e[t+1]=s*r+o*n}}transform(e,t){this._transform(e,t,!1)}realTransform(e,t){this._transform(e,t,!0)}}class f{constructor(e){this.fft_length=e,this.isPowerOfTwo=p(e),this.isPowerOfTwo?(this.fft=new _(e),this.outputBufferSize=2*e):(this.fft=new m(e),this.outputBufferSize=this.fft.bufferSize)}realTransform(e,t){this.fft.realTransform(e,t)}transform(e,t){this.fft.transform(e,t)}}function g(e,t){if(t%2==0||t<=0)throw new Error("Window size must be a positive odd number");const s=new e.constructor(e.length),o=new e.constructor(t),n=Math.floor(t/2);for(let t=0;t<e.length;++t){let r=0;for(let s=-n;s<=n;++s){let n=t+s;n<0?n=Math.abs(n):n>=e.length&&(n=2*(e.length-1)-n),o[r++]=e[n]}o.sort(),s[t]=o[n]}return s}function M(e,t){const s=Math.pow(10,t);return Math.round(e*s)/s}function w(e){const t=Math.round(e);return Math.abs(e)%1==.5?t%2==0?t:t-1:t}},"./src/utils/tensor.js":
/*!*****************************!*\
!*** ./src/utils/tensor.js ***!
\*****************************/(e,t,s)=>{s.r(t),s.d(t,{Tensor:()=>i,cat:()=>m,dynamicTimeWarping:()=>w,interpolate:()=>c,layer_norm:()=>u,mean:()=>M,mean_pooling:()=>d,ones:()=>T,ones_like:()=>k,permute:()=>l,stack:()=>f,std_mean:()=>g});var o=s(/*! ../backends/onnx.js */"./src/backends/onnx.js"),n=s(/*! ./maths.js */"./src/utils/maths.js");const r=Object.freeze({float32:Float32Array,float64:Float64Array,string:Array,int8:Int8Array,uint8:Uint8Array,int16:Int16Array,uint16:Uint16Array,int32:Int32Array,uint32:Uint32Array,int64:BigInt64Array,uint64:BigUint64Array,bool:Uint8Array}),a=o.ONNX.Tensor;class i{dims;type;data;size;constructor(...e){return e[0]instanceof a?Object.assign(this,e[0]):Object.assign(this,new a(e[0],e[1],e[2])),new Proxy(this,{get:(e,t)=>{if("string"==typeof t){let s=Number(t);if(Number.isInteger(s))return e._getitem(s)}return e[t]},set:(e,t,s)=>e[t]=s})}*[Symbol.iterator](){const[e,...t]=this.dims;if(t.length>0){const s=t.reduce(((e,t)=>e*t));for(let o=0;o<e;++o)yield this._subarray(o,s,t)}else yield*this.data}_getitem(e){const[t,...s]=this.dims;if(e=_(e,t),s.length>0){const t=s.reduce(((e,t)=>e*t));return this._subarray(e,t,s)}return new i(this.type,[this.data[e]],s)}indexOf(e){for(let t=0;t<this.data.length;++t)if(this.data[t]==e)return t;return-1}_subarray(e,t,s){const o=e*t,n=(e+1)*t,r="subarray"in this.data?this.data.subarray(o,n):this.data.slice(o,n);return new i(this.type,r,s)}item(){if(1!==this.data.length)throw new Error(`a Tensor with ${this.data.length} elements cannot be converted to Scalar`);return this.data[0]}tolist(){return function(e,t){const s=e.length,o=t.reduce(((e,t)=>e*t));if(s!==o)throw Error(`cannot reshape array of size ${s} into shape (${t})`);let n=e;for(let e=t.length-1;e>=0;e--)n=n.reduce(((s,o)=>{let n=s[s.length-1];return n.length<t[e]?n.push(o):s.push([o]),s}),[[]]);return n[0]}(this.data,this.dims)}sigmoid(){return this.clone().sigmoid_()}sigmoid_(){for(let e=0;e<this.data.length;++e)this.data[e]=1/(1+Math.exp(-this.data[e]));return this}mul(e){return this.clone().mul_(e)}mul_(e){for(let t=0;t<this.data.length;++t)this.data[t]*=e;return this}add(e){return this.clone().add_(e)}add_(e){for(let t=0;t<this.data.length;++t)this.data[t]+=e;return this}clone(){return new i(this.type,this.data.slice(),this.dims.slice())}slice(...e){let t=[],s=[];for(let o=0;o<this.dims.length;++o){let n=e[o];if(null==n)s.push([0,this.dims[o]]),t.push(this.dims[o]);else if("number"==typeof n)n=_(n,this.dims[o],o),s.push([n,n+1]);else{if(!Array.isArray(n)||2!==n.length)throw new Error(`Invalid slice: ${n}`);{if(n[0]>n[1])throw new Error(`Invalid slice: ${n}`);let e=[Math.max(n[0],0),Math.min(n[1],this.dims[o])];s.push(e),t.push(e[1]-e[0])}}}let o=s.map((([e,t])=>t-e)),n=o.reduce(((e,t)=>e*t)),r=new this.data.constructor(n);const a=this.stride();for(let e=0;e<n;++e){let t=0;for(let n=o.length-1,r=e;n>=0;--n){const e=o[n];t+=(r%e+s[n][0])*a[n],r=Math.floor(r/e)}r[e]=this.data[t]}return new i(this.type,r,t)}permute(...e){return l(this,e)}transpose(...e){return this.permute(...e)}sum(e=null,t=!1){return this.norm(1,e,t)}norm(e="fro",t=null,s=!1){if("fro"===e)e=2;else if("string"==typeof e)throw Error(`Unsupported norm: ${e}`);if(null===t){let t=this.data.reduce(((t,s)=>t+s**e),0)**(1/e);return new i(this.type,[t],[])}t=_(t,this.dims.length);const o=this.dims.slice();o[t]=1;const n=new this.data.constructor(this.data.length/this.dims[t]);for(let s=0;s<this.data.length;++s){let r=0;for(let e=this.dims.length-1,n=s,a=1;e>=0;--e){const s=this.dims[e];if(e!==t){r+=n%s*a,a*=o[e]}n=Math.floor(n/s)}n[r]+=this.data[s]**e}if(1!==e)for(let t=0;t<n.length;++t)n[t]=n[t]**(1/e);return s||o.splice(t,1),new i(this.type,n,o)}normalize_(e=2,t=1){t=_(t,this.dims.length);const s=this.norm(e,t,!0);for(let e=0;e<this.data.length;++e){let o=0;for(let s=this.dims.length-1,n=e,r=1;s>=0;--s){const e=this.dims[s];if(s!==t){o+=n%e*r,r*=this.dims[s]}n=Math.floor(n/e)}this.data[e]/=s.data[o]}return this}normalize(e=2,t=1){return this.clone().normalize_(e,t)}stride(){return function(e){const t=new Array(e.length);for(let s=e.length-1,o=1;s>=0;--s)t[s]=o,o*=e[s];return t}(this.dims)}squeeze(e=null){return new i(this.type,this.data,h(this.dims,e))}squeeze_(e=null){return this.dims=h(this.dims,e),this}unsqueeze(e=null){return new i(this.type,this.data,p(this.dims,e))}unsqueeze_(e=null){return this.dims=p(this.dims,e),this}flatten_(e=0,t=-1){t=(t+this.dims.length)%this.dims.length;let s=this.dims.slice(0,e),o=this.dims.slice(e,t+1),n=this.dims.slice(t+1);return this.dims=[...s,o.reduce(((e,t)=>e*t),1),...n],this}flatten(e=0,t=-1){return this.clone().flatten_(e,t)}view(...e){let t=-1;for(let s=0;s<e.length;++s)if(-1===e[s]){if(-1!==t)throw new Error("Only one dimension can be inferred");t=s}if(-1!==t){const s=e.reduce(((e,s,o)=>o!==t?e*s:e),1);e[t]=this.data.length/s}return new i(this.type,this.data,e)}neg_(){for(let e=0;e<this.data.length;++e)this.data[e]=-this.data[e];return this}neg(){return this.clone().neg_()}clamp_(e,t){for(let s=0;s<this.data.length;++s)this.data[s]=Math.min(Math.max(this.data[s],e),t);return this}clamp(e,t){return this.clone().clamp_(e,t)}round_(){for(let e=0;e<this.data.length;++e)this.data[e]=Math.round(this.data[e]);return this}round(){return this.clone().round_()}to(e){if(this.type===e)return this;if(!r.hasOwnProperty(e))throw new Error(`Unsupported type: ${e}`);return new i(e,r[e].from(this.data),this.dims)}}function l(e,t){const[s,o]=(0,n.permute_data)(e.data,e.dims,t);return new i(e.type,s,o)}function c(e,[t,s],o="bilinear",r=!1){const a=e.dims.at(-3)??1,l=e.dims.at(-2),c=e.dims.at(-1);let d=(0,n.interpolate_data)(e.data,[a,l,c],[t,s],o,r);return new i(e.type,d,[a,t,s])}function d(e,t){let s=[e.dims[0],e.dims[2]],o=new e.data.constructor(s[0]*s[1]),[n,r,a]=e.dims,l=0;for(let s=0;s<n;++s){let n=s*a*r;for(let i=0;i<a;++i){let c=0,d=0,u=s*r,h=n+i;for(let s=0;s<r;++s){let o=Number(t.data[u+s]);d+=o,c+=e.data[h+s*a]*o}let p=c/d;o[l++]=p}}return new i(e.type,o,s)}function u(e,t,{eps:s=1e-5}={}){if(2!==e.dims.length)throw new Error("`layer_norm` currently only supports 2D input.");const[o,n]=e.dims;if(1!==t.length&&t[0]!==n)throw new Error("`normalized_shape` must be a 1D array with shape `[input.dims[1]]`.");const[r,a]=g(e,1,0,!0),l=new e.data.constructor(e.data.length);for(let t=0;t<o;++t){const o=t*n;for(let i=0;i<n;++i){const n=o+i;l[n]=(e.data[n]-a.data[t])/(r.data[t]+s)}}return new i(e.type,l,e.dims)}function h(e,t){return e=e.slice(),null===t?e=e.filter((e=>1!==e)):"number"==typeof t?1===e[t]&&e.splice(t,1):Array.isArray(t)&&(e=e.filter(((e,s)=>1!==e||!t.includes(s)))),e}function p(e,t){return t=_(t,e.length+1),(e=e.slice()).splice(t,0,1),e}function _(e,t,s=null){if(e<-t||e>=t)throw new Error(`IndexError: index ${e} is out of bounds for dimension${null===s?"":" "+s} with size ${t}`);return e<0&&(e=(e%t+t)%t),e}function m(e,t=0){t=_(t,e[0].dims.length);const s=e[0].dims.slice();s[t]=e.reduce(((e,s)=>e+s.dims[t]),0);const o=s.reduce(((e,t)=>e*t),1),n=new e[0].data.constructor(o),r=e[0].type;if(0===t){let t=0;for(let s of e)n.set(s.data,t),t+=s.data.length}else{let o=0;for(let r=0;r<e.length;++r){let a=e[r];for(let e=0;e<a.data.length;++e){let r=0;for(let n=a.dims.length-1,i=e,l=1;n>=0;--n){const e=a.dims[n];let c=i%e;n===t&&(c+=o),r+=c*l,l*=s[n],i=Math.floor(i/e)}n[r]=a.data[e]}o+=a.dims[t]}}return new i(r,n,s)}function f(e,t=0){return m(e.map((e=>e.unsqueeze(t))),t)}function g(e,t=null,s=1,o=!1){if(null===t){const t=e.data.reduce(((e,t)=>e+t),0)/e.data.length,o=Math.sqrt(e.data.reduce(((e,s)=>e+(s-t)**2),0)/(e.data.length-s)),n=new i(e.type,[t],[]);return[new i(e.type,[o],[]),n]}const n=M(e,t=_(t,e.dims.length),o),r=e.dims.slice();r[t]=1;const a=new e.data.constructor(e.data.length/e.dims[t]);for(let s=0;s<e.data.length;++s){let o=0;for(let n=e.dims.length-1,a=s,i=1;n>=0;--n){const s=e.dims[n];if(n!==t){o+=a%s*i,i*=r[n]}a=Math.floor(a/s)}a[o]+=(e.data[s]-n.data[o])**2}for(let o=0;o<a.length;++o)a[o]=Math.sqrt(a[o]/(e.dims[t]-s));o||r.splice(t,1);return[new i(e.type,a,r),n]}function M(e,t=null,s=!1){if(null===t){let t=e.data.reduce(((e,t)=>e+t),0);return new i(e.type,[t/e.data.length],[])}t=_(t,e.dims.length);const o=e.dims.slice();o[t]=1;const n=new e.data.constructor(e.data.length/e.dims[t]);for(let s=0;s<e.data.length;++s){let r=0;for(let n=e.dims.length-1,a=s,i=1;n>=0;--n){const s=e.dims[n];if(n!==t){r+=a%s*i,i*=o[n]}a=Math.floor(a/s)}n[r]+=e.data[s]}if(1!==e.dims[t])for(let s=0;s<n.length;++s)n[s]=n[s]/e.dims[t];return s||o.splice(t,1),new i(e.type,n,o)}function w(e){const[t,s]=e.dims,o=[t+1,s+1],n=new i("float32",new Float32Array(o[0]*o[1]).fill(1/0),o),r=new i("float32",new Float32Array(o[0]*o[1]).fill(-1),o);n[0].data[0]=0;for(let o=1;o<s+1;++o)for(let s=1;s<t+1;++s){const t=n[s-1][o-1].item(),a=n[s-1][o].item(),i=n[s][o-1].item();let l,c;t<a&&t<i?(l=t,c=0):a<t&&a<i?(l=a,c=1):(l=i,c=2),n[s].data[o]=e[s-1][o-1].item()+l,r[s].data[o]=c}let a=t,l=s;r.data.fill(2,0,o[1]);for(let e=0;e<o[0];++e)r[e].data[0]=1;let c=[],d=[];for(;a>0||l>0;){c.push(a-1),d.push(l-1);switch(r[a][l].item()){case 0:--a,--l;break;case 1:--a;break;case 2:--l;break;default:throw new Error(`Internal error in dynamic time warping. Unexpected trace[${a}, ${l}]. Please file a bug report.`)}}return c.reverse(),d.reverse(),[c,d]}function T(e){const t=e.reduce(((e,t)=>e*t),1);return new i("int64",new BigInt64Array(t).fill(1n),e)}function k(e){return T(e.dims)}}},s={};function o(e){var n=s[e];if(void 0!==n)return n.exports;var r=s[e]={exports:{}};return t[e](r,r.exports,o),r.exports}o.d=(e,t)=>{for(var s in t)o.o(t,s)&&!o.o(e,s)&&Object.defineProperty(e,s,{enumerable:!0,get:t[s]})},o.o=(e,t)=>Object.prototype.hasOwnProperty.call(e,t),o.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})};var n=o("./src/transformers.js"),r=n.ASTFeatureExtractor,a=n.ASTForAudioClassification,i=n.ASTModel,l=n.ASTPreTrainedModel,c=n.AlbertForMaskedLM,d=n.AlbertForQuestionAnswering,u=n.AlbertForSequenceClassification,h=n.AlbertModel,p=n.AlbertPreTrainedModel,_=n.AlbertTokenizer,m=n.AudioClassificationPipeline,f=n.AutoConfig,g=n.AutoModel,M=n.AutoModelForAudioClassification,w=n.AutoModelForAudioFrameClassification,T=n.AutoModelForCTC,k=n.AutoModelForCausalLM,b=n.AutoModelForDepthEstimation,x=n.AutoModelForDocumentQuestionAnswering,y=n.AutoModelForImageClassification,F=n.AutoModelForImageFeatureExtraction,C=n.AutoModelForImageMatting,P=n.AutoModelForImageSegmentation,v=n.AutoModelForImageToImage,S=n.AutoModelForMaskGeneration,A=n.AutoModelForMaskedLM,L=n.AutoModelForObjectDetection,E=n.AutoModelForQuestionAnswering,z=n.AutoModelForSemanticSegmentation,B=n.AutoModelForSeq2SeqLM,I=n.AutoModelForSequenceClassification,O=n.AutoModelForSpeechSeq2Seq,D=n.AutoModelForTextToSpectrogram,N=n.AutoModelForTextToWaveform,V=n.AutoModelForTokenClassification,q=n.AutoModelForVision2Seq,j=n.AutoModelForXVector,R=n.AutoModelForZeroShotObjectDetection,G=n.AutoProcessor,W=n.AutoTokenizer,$=n.AutomaticSpeechRecognitionPipeline,U=n.BartForConditionalGeneration,X=n.BartForSequenceClassification,Q=n.BartModel,H=n.BartPretrainedModel,Y=n.BartTokenizer,J=n.BaseModelOutput,Z=n.BeitFeatureExtractor,K=n.BeitForImageClassification,ee=n.BeitModel,te=n.BeitPreTrainedModel,se=n.BertForMaskedLM,oe=n.BertForQuestionAnswering,ne=n.BertForSequenceClassification,re=n.BertForTokenClassification,ae=n.BertModel,ie=n.BertPreTrainedModel,le=n.BertTokenizer,ce=n.BitImageProcessor,de=n.BlenderbotForConditionalGeneration,ue=n.BlenderbotModel,he=n.BlenderbotPreTrainedModel,pe=n.BlenderbotSmallForConditionalGeneration,_e=n.BlenderbotSmallModel,me=n.BlenderbotSmallPreTrainedModel,fe=n.BlenderbotSmallTokenizer,ge=n.BlenderbotTokenizer,Me=n.BloomForCausalLM,we=n.BloomModel,Te=n.BloomPreTrainedModel,ke=n.BloomTokenizer,be=n.CLIPFeatureExtractor,xe=n.CLIPModel,ye=n.CLIPPreTrainedModel,Fe=n.CLIPSegForImageSegmentation,Ce=n.CLIPSegModel,Pe=n.CLIPSegPreTrainedModel,ve=n.CLIPTextModelWithProjection,Se=n.CLIPTokenizer,Ae=n.CLIPVisionModelWithProjection,Le=n.CamembertForMaskedLM,Ee=n.CamembertForQuestionAnswering,ze=n.CamembertForSequenceClassification,Be=n.CamembertForTokenClassification,Ie=n.CamembertModel,Oe=n.CamembertPreTrainedModel,De=n.CamembertTokenizer,Ne=n.CausalLMOutput,Ve=n.CausalLMOutputWithPast,qe=n.ChineseCLIPFeatureExtractor,je=n.ChineseCLIPModel,Re=n.ChineseCLIPPreTrainedModel,Ge=n.ClapAudioModelWithProjection,We=n.ClapFeatureExtractor,$e=n.ClapModel,Ue=n.ClapPreTrainedModel,Xe=n.ClapTextModelWithProjection,Qe=n.CodeGenForCausalLM,He=n.CodeGenModel,Ye=n.CodeGenPreTrainedModel,Je=n.CodeGenTokenizer,Ze=n.CodeLlamaTokenizer,Ke=n.CohereTokenizer,et=n.ConvBertForMaskedLM,tt=n.ConvBertForQuestionAnswering,st=n.ConvBertForSequenceClassification,ot=n.ConvBertForTokenClassification,nt=n.ConvBertModel,rt=n.ConvBertPreTrainedModel,at=n.ConvBertTokenizer,it=n.ConvNextFeatureExtractor,lt=n.ConvNextForImageClassification,ct=n.ConvNextImageProcessor,dt=n.ConvNextModel,ut=n.ConvNextPreTrainedModel,ht=n.ConvNextV2ForImageClassification,pt=n.ConvNextV2Model,_t=n.ConvNextV2PreTrainedModel,mt=n.DPTFeatureExtractor,ft=n.DPTForDepthEstimation,gt=n.DPTImageProcessor,Mt=n.DPTModel,wt=n.DPTPreTrainedModel,Tt=n.DebertaForMaskedLM,kt=n.DebertaForQuestionAnswering,bt=n.DebertaForSequenceClassification,xt=n.DebertaForTokenClassification,yt=n.DebertaModel,Ft=n.DebertaPreTrainedModel,Ct=n.DebertaTokenizer,Pt=n.DebertaV2ForMaskedLM,vt=n.DebertaV2ForQuestionAnswering,St=n.DebertaV2ForSequenceClassification,At=n.DebertaV2ForTokenClassification,Lt=n.DebertaV2Model,Et=n.DebertaV2PreTrainedModel,zt=n.DebertaV2Tokenizer,Bt=n.DeiTFeatureExtractor,It=n.DeiTForImageClassification,Ot=n.DeiTModel,Dt=n.DeiTPreTrainedModel,Nt=n.DepthAnythingForDepthEstimation,Vt=n.DepthAnythingPreTrainedModel,qt=n.DepthEstimationPipeline,jt=n.DetrFeatureExtractor,Rt=n.DetrForObjectDetection,Gt=n.DetrForSegmentation,Wt=n.DetrModel,$t=n.DetrObjectDetectionOutput,Ut=n.DetrPreTrainedModel,Xt=n.DetrSegmentationOutput,Qt=n.Dinov2ForImageClassification,Ht=n.Dinov2Model,Yt=n.Dinov2PreTrainedModel,Jt=n.DistilBertForMaskedLM,Zt=n.DistilBertForQuestionAnswering,Kt=n.DistilBertForSequenceClassification,es=n.DistilBertForTokenClassification,ts=n.DistilBertModel,ss=n.DistilBertPreTrainedModel,os=n.DistilBertTokenizer,ns=n.DocumentQuestionAnsweringPipeline,rs=n.DonutFeatureExtractor,as=n.DonutSwinModel,is=n.DonutSwinPreTrainedModel,ls=n.EfficientNetForImageClassification,cs=n.EfficientNetImageProcessor,ds=n.EfficientNetModel,us=n.EfficientNetPreTrainedModel,hs=n.ElectraForMaskedLM,ps=n.ElectraForQuestionAnswering,_s=n.ElectraForSequenceClassification,ms=n.ElectraForTokenClassification,fs=n.ElectraModel,gs=n.ElectraPreTrainedModel,Ms=n.ElectraTokenizer,ws=n.EsmForMaskedLM,Ts=n.EsmForSequenceClassification,ks=n.EsmForTokenClassification,bs=n.EsmModel,xs=n.EsmPreTrainedModel,ys=n.EsmTokenizer,Fs=n.FFT,Cs=n.FalconForCausalLM,Ps=n.FalconModel,vs=n.FalconPreTrainedModel,Ss=n.FalconTokenizer,As=n.FeatureExtractionPipeline,Ls=n.FeatureExtractor,Es=n.FillMaskPipeline,zs=n.GLPNFeatureExtractor,Bs=n.GLPNForDepthEstimation,Is=n.GLPNModel,Os=n.GLPNPreTrainedModel,Ds=n.GPT2LMHeadModel,Ns=n.GPT2Model,Vs=n.GPT2PreTrainedModel,qs=n.GPT2Tokenizer,js=n.GPTBigCodeForCausalLM,Rs=n.GPTBigCodeModel,Gs=n.GPTBigCodePreTrainedModel,Ws=n.GPTJForCausalLM,$s=n.GPTJModel,Us=n.GPTJPreTrainedModel,Xs=n.GPTNeoForCausalLM,Qs=n.GPTNeoModel,Hs=n.GPTNeoPreTrainedModel,Ys=n.GPTNeoXForCausalLM,Js=n.GPTNeoXModel,Zs=n.GPTNeoXPreTrainedModel,Ks=n.GPTNeoXTokenizer,eo=n.GemmaTokenizer,to=n.Grok1Tokenizer,so=n.HerbertTokenizer,oo=n.HubertForCTC,no=n.HubertForSequenceClassification,ro=n.HubertModel,ao=n.HubertPreTrainedModel,io=n.ImageClassificationPipeline,lo=n.ImageFeatureExtractionPipeline,co=n.ImageFeatureExtractor,uo=n.ImageMattingOutput,ho=n.ImageSegmentationPipeline,po=n.ImageToImagePipeline,_o=n.ImageToTextPipeline,mo=n.LlamaForCausalLM,fo=n.LlamaModel,go=n.LlamaPreTrainedModel,Mo=n.LlamaTokenizer,wo=n.LongT5ForConditionalGeneration,To=n.LongT5Model,ko=n.LongT5PreTrainedModel,bo=n.M2M100ForConditionalGeneration,xo=n.M2M100Model,yo=n.M2M100PreTrainedModel,Fo=n.M2M100Tokenizer,Co=n.MBart50Tokenizer,Po=n.MBartForCausalLM,vo=n.MBartForConditionalGeneration,So=n.MBartForSequenceClassification,Ao=n.MBartModel,Lo=n.MBartPreTrainedModel,Eo=n.MBartTokenizer,zo=n.MPNetForMaskedLM,Bo=n.MPNetForQuestionAnswering,Io=n.MPNetForSequenceClassification,Oo=n.MPNetForTokenClassification,Do=n.MPNetModel,No=n.MPNetPreTrainedModel,Vo=n.MPNetTokenizer,qo=n.MT5ForConditionalGeneration,jo=n.MT5Model,Ro=n.MT5PreTrainedModel,Go=n.MarianMTModel,Wo=n.MarianModel,$o=n.MarianPreTrainedModel,Uo=n.MarianTokenizer,Xo=n.MaskedLMOutput,Qo=n.MistralForCausalLM,Ho=n.MistralModel,Yo=n.MistralPreTrainedModel,Jo=n.MobileBertForMaskedLM,Zo=n.MobileBertForQuestionAnswering,Ko=n.MobileBertForSequenceClassification,en=n.MobileBertModel,tn=n.MobileBertPreTrainedModel,sn=n.MobileBertTokenizer,on=n.MobileViTFeatureExtractor,nn=n.MobileViTForImageClassification,rn=n.MobileViTModel,an=n.MobileViTPreTrainedModel,ln=n.ModelOutput,cn=n.MptForCausalLM,dn=n.MptModel,un=n.MptPreTrainedModel,hn=n.NllbTokenizer,pn=n.NomicBertModel,_n=n.NomicBertPreTrainedModel,mn=n.NougatImageProcessor,fn=n.NougatTokenizer,gn=n.OPTForCausalLM,Mn=n.OPTModel,wn=n.OPTPreTrainedModel,Tn=n.ObjectDetectionPipeline,kn=n.OwlViTFeatureExtractor,bn=n.OwlViTForObjectDetection,xn=n.OwlViTModel,yn=n.OwlViTPreTrainedModel,Fn=n.OwlViTProcessor,Cn=n.Owlv2ForObjectDetection,Pn=n.Owlv2ImageProcessor,vn=n.Owlv2Model,Sn=n.Owlv2PreTrainedModel,An=n.PhiForCausalLM,Ln=n.PhiModel,En=n.PhiPreTrainedModel,zn=n.Pipeline,Bn=n.PreTrainedModel,In=n.PreTrainedTokenizer,On=n.PretrainedConfig,Dn=n.PretrainedMixin,Nn=n.Processor,Vn=n.QuestionAnsweringModelOutput,qn=n.QuestionAnsweringPipeline,jn=n.Qwen2ForCausalLM,Rn=n.Qwen2Model,Gn=n.Qwen2PreTrainedModel,Wn=n.Qwen2Tokenizer,$n=n.RawImage,Un=n.ResNetForImageClassification,Xn=n.ResNetModel,Qn=n.ResNetPreTrainedModel,Hn=n.RoFormerForMaskedLM,Yn=n.RoFormerForQuestionAnswering,Jn=n.RoFormerForSequenceClassification,Zn=n.RoFormerForTokenClassification,Kn=n.RoFormerModel,er=n.RoFormerPreTrainedModel,tr=n.RoFormerTokenizer,sr=n.RobertaForMaskedLM,or=n.RobertaForQuestionAnswering,nr=n.RobertaForSequenceClassification,rr=n.RobertaForTokenClassification,ar=n.RobertaModel,ir=n.RobertaPreTrainedModel,lr=n.RobertaTokenizer,cr=n.SamImageProcessor,dr=n.SamImageSegmentationOutput,ur=n.SamModel,hr=n.SamPreTrainedModel,pr=n.SamProcessor,_r=n.SeamlessM4TFeatureExtractor,mr=n.SegformerFeatureExtractor,fr=n.SegformerForImageClassification,gr=n.SegformerForSemanticSegmentation,Mr=n.SegformerModel,wr=n.SegformerPreTrainedModel,Tr=n.Seq2SeqLMOutput,kr=n.SequenceClassifierOutput,br=n.SiglipImageProcessor,xr=n.SiglipModel,yr=n.SiglipPreTrainedModel,Fr=n.SiglipTextModel,Cr=n.SiglipTokenizer,Pr=n.SiglipVisionModel,vr=n.SpeechT5FeatureExtractor,Sr=n.SpeechT5ForSpeechToText,Ar=n.SpeechT5ForTextToSpeech,Lr=n.SpeechT5HifiGan,Er=n.SpeechT5Model,zr=n.SpeechT5PreTrainedModel,Br=n.SpeechT5Processor,Ir=n.SpeechT5Tokenizer,Or=n.SqueezeBertForMaskedLM,Dr=n.SqueezeBertForQuestionAnswering,Nr=n.SqueezeBertForSequenceClassification,Vr=n.SqueezeBertModel,qr=n.SqueezeBertPreTrainedModel,jr=n.SqueezeBertTokenizer,Rr=n.StableLmForCausalLM,Gr=n.StableLmModel,Wr=n.StableLmPreTrainedModel,$r=n.Starcoder2ForCausalLM,Ur=n.Starcoder2Model,Xr=n.Starcoder2PreTrainedModel,Qr=n.SummarizationPipeline,Hr=n.Swin2SRForImageSuperResolution,Yr=n.Swin2SRImageProcessor,Jr=n.Swin2SRModel,Zr=n.Swin2SRPreTrainedModel,Kr=n.SwinForImageClassification,ea=n.SwinModel,ta=n.SwinPreTrainedModel,sa=n.T5ForConditionalGeneration,oa=n.T5Model,na=n.T5PreTrainedModel,ra=n.T5Tokenizer,aa=n.TableTransformerForObjectDetection,ia=n.TableTransformerModel,la=n.TableTransformerObjectDetectionOutput,ca=n.TableTransformerPreTrainedModel,da=n.Tensor,ua=n.Text2TextGenerationPipeline,ha=n.TextClassificationPipeline,pa=n.TextGenerationPipeline,_a=n.TextToAudioPipeline,ma=n.TokenClassificationPipeline,fa=n.TokenClassifierOutput,ga=n.TokenizerModel,Ma=n.TrOCRForCausalLM,wa=n.TrOCRPreTrainedModel,Ta=n.TranslationPipeline,ka=n.UniSpeechForCTC,ba=n.UniSpeechForSequenceClassification,xa=n.UniSpeechModel,ya=n.UniSpeechPreTrainedModel,Fa=n.UniSpeechSatForAudioFrameClassification,Ca=n.UniSpeechSatForCTC,Pa=n.UniSpeechSatForSequenceClassification,va=n.UniSpeechSatModel,Sa=n.UniSpeechSatPreTrainedModel,Aa=n.ViTFeatureExtractor,La=n.ViTForImageClassification,Ea=n.ViTImageProcessor,za=n.ViTModel,Ba=n.ViTPreTrainedModel,Ia=n.VisionEncoderDecoderModel,Oa=n.VitMatteForImageMatting,Da=n.VitMatteImageProcessor,Na=n.VitMattePreTrainedModel,Va=n.VitsModel,qa=n.VitsModelOutput,ja=n.VitsPreTrainedModel,Ra=n.VitsTokenizer,Ga=n.Wav2Vec2BertForCTC,Wa=n.Wav2Vec2BertForSequenceClassification,$a=n.Wav2Vec2BertModel,Ua=n.Wav2Vec2BertPreTrainedModel,Xa=n.Wav2Vec2CTCTokenizer,Qa=n.Wav2Vec2FeatureExtractor,Ha=n.Wav2Vec2ForAudioFrameClassification,Ya=n.Wav2Vec2ForCTC,Ja=n.Wav2Vec2ForSequenceClassification,Za=n.Wav2Vec2Model,Ka=n.Wav2Vec2PreTrainedModel,ei=n.Wav2Vec2ProcessorWithLM,ti=n.WavLMForAudioFrameClassification,si=n.WavLMForCTC,oi=n.WavLMForSequenceClassification,ni=n.WavLMForXVector,ri=n.WavLMModel,ai=n.WavLMPreTrainedModel,ii=n.WhisperFeatureExtractor,li=n.WhisperForConditionalGeneration,ci=n.WhisperModel,di=n.WhisperPreTrainedModel,ui=n.WhisperProcessor,hi=n.WhisperTokenizer,pi=n.XLMForQuestionAnswering,_i=n.XLMForSequenceClassification,mi=n.XLMForTokenClassification,fi=n.XLMModel,gi=n.XLMPreTrainedModel,Mi=n.XLMRobertaForMaskedLM,wi=n.XLMRobertaForQuestionAnswering,Ti=n.XLMRobertaForSequenceClassification,ki=n.XLMRobertaForTokenClassification,bi=n.XLMRobertaModel,xi=n.XLMRobertaPreTrainedModel,yi=n.XLMRobertaTokenizer,Fi=n.XLMTokenizer,Ci=n.XLMWithLMHeadModel,Pi=n.XVectorOutput,vi=n.YolosFeatureExtractor,Si=n.YolosForObjectDetection,Ai=n.YolosModel,Li=n.YolosObjectDetectionOutput,Ei=n.YolosPreTrainedModel,zi=n.ZeroShotAudioClassificationPipeline,Bi=n.ZeroShotClassificationPipeline,Ii=n.ZeroShotImageClassificationPipeline,Oi=n.ZeroShotObjectDetectionPipeline,Di=n.bankers_round,Ni=n.cat,Vi=n.cos_sim,qi=n.dot,ji=n.dynamicTimeWarping,Ri=n.env,Gi=n.getTopItems,Wi=n.hanning,$i=n.interpolate,Ui=n.interpolate_data,Xi=n.layer_norm,Qi=n.log_softmax,Hi=n.magnitude,Yi=n.max,Ji=n.mean,Zi=n.mean_pooling,Ki=n.medianFilter,el=n.mel_filter_bank,tl=n.min,sl=n.ones,ol=n.ones_like,nl=n.permute,rl=n.permute_data,al=n.pipeline,il=n.read_audio,ll=n.round,cl=n.softmax,dl=n.spectrogram,ul=n.stack,hl=n.std_mean,pl=n.window_function;export{r as ASTFeatureExtractor,a as ASTForAudioClassification,i as ASTModel,l as ASTPreTrainedModel,c as AlbertForMaskedLM,d as AlbertForQuestionAnswering,u as AlbertForSequenceClassification,h as AlbertModel,p as AlbertPreTrainedModel,_ as AlbertTokenizer,m as AudioClassificationPipeline,f as AutoConfig,g as AutoModel,M as AutoModelForAudioClassification,w as AutoModelForAudioFrameClassification,T as AutoModelForCTC,k as AutoModelForCausalLM,b as AutoModelForDepthEstimation,x as AutoModelForDocumentQuestionAnswering,y as AutoModelForImageClassification,F as AutoModelForImageFeatureExtraction,C as AutoModelForImageMatting,P as AutoModelForImageSegmentation,v as AutoModelForImageToImage,S as AutoModelForMaskGeneration,A as AutoModelForMaskedLM,L as AutoModelForObjectDetection,E as AutoModelForQuestionAnswering,z as AutoModelForSemanticSegmentation,B as AutoModelForSeq2SeqLM,I as AutoModelForSequenceClassification,O as AutoModelForSpeechSeq2Seq,D as AutoModelForTextToSpectrogram,N as AutoModelForTextToWaveform,V as AutoModelForTokenClassification,q as AutoModelForVision2Seq,j as AutoModelForXVector,R as AutoModelForZeroShotObjectDetection,G as AutoProcessor,W as AutoTokenizer,$ as AutomaticSpeechRecognitionPipeline,U as BartForConditionalGeneration,X as BartForSequenceClassification,Q as BartModel,H as BartPretrainedModel,Y as BartTokenizer,J as BaseModelOutput,Z as BeitFeatureExtractor,K as BeitForImageClassification,ee as BeitModel,te as BeitPreTrainedModel,se as BertForMaskedLM,oe as BertForQuestionAnswering,ne as BertForSequenceClassification,re as BertForTokenClassification,ae as BertModel,ie as BertPreTrainedModel,le as BertTokenizer,ce as BitImageProcessor,de as BlenderbotForConditionalGeneration,ue as BlenderbotModel,he as BlenderbotPreTrainedModel,pe as BlenderbotSmallForConditionalGeneration,_e as BlenderbotSmallModel,me as BlenderbotSmallPreTrainedModel,fe as BlenderbotSmallTokenizer,ge as BlenderbotTokenizer,Me as BloomForCausalLM,we as BloomModel,Te as BloomPreTrainedModel,ke as BloomTokenizer,be as CLIPFeatureExtractor,xe as CLIPModel,ye as CLIPPreTrainedModel,Fe as CLIPSegForImageSegmentation,Ce as CLIPSegModel,Pe as CLIPSegPreTrainedModel,ve as CLIPTextModelWithProjection,Se as CLIPTokenizer,Ae as CLIPVisionModelWithProjection,Le as CamembertForMaskedLM,Ee as CamembertForQuestionAnswering,ze as CamembertForSequenceClassification,Be as CamembertForTokenClassification,Ie as CamembertModel,Oe as CamembertPreTrainedModel,De as CamembertTokenizer,Ne as CausalLMOutput,Ve as CausalLMOutputWithPast,qe as ChineseCLIPFeatureExtractor,je as ChineseCLIPModel,Re as ChineseCLIPPreTrainedModel,Ge as ClapAudioModelWithProjection,We as ClapFeatureExtractor,$e as ClapModel,Ue as ClapPreTrainedModel,Xe as ClapTextModelWithProjection,Qe as CodeGenForCausalLM,He as CodeGenModel,Ye as CodeGenPreTrainedModel,Je as CodeGenTokenizer,Ze as CodeLlamaTokenizer,Ke as CohereTokenizer,et as ConvBertForMaskedLM,tt as ConvBertForQuestionAnswering,st as ConvBertForSequenceClassification,ot as ConvBertForTokenClassification,nt as ConvBertModel,rt as ConvBertPreTrainedModel,at as ConvBertTokenizer,it as ConvNextFeatureExtractor,lt as ConvNextForImageClassification,ct as ConvNextImageProcessor,dt as ConvNextModel,ut as ConvNextPreTrainedModel,ht as ConvNextV2ForImageClassification,pt as ConvNextV2Model,_t as ConvNextV2PreTrainedModel,mt as DPTFeatureExtractor,ft as DPTForDepthEstimation,gt as DPTImageProcessor,Mt as DPTModel,wt as DPTPreTrainedModel,Tt as DebertaForMaskedLM,kt as DebertaForQuestionAnswering,bt as DebertaForSequenceClassification,xt as DebertaForTokenClassification,yt as DebertaModel,Ft as DebertaPreTrainedModel,Ct as DebertaTokenizer,Pt as DebertaV2ForMaskedLM,vt as DebertaV2ForQuestionAnswering,St as DebertaV2ForSequenceClassification,At as DebertaV2ForTokenClassification,Lt as DebertaV2Model,Et as DebertaV2PreTrainedModel,zt as DebertaV2Tokenizer,Bt as DeiTFeatureExtractor,It as DeiTForImageClassification,Ot as DeiTModel,Dt as DeiTPreTrainedModel,Nt as DepthAnythingForDepthEstimation,Vt as DepthAnythingPreTrainedModel,qt as DepthEstimationPipeline,jt as DetrFeatureExtractor,Rt as DetrForObjectDetection,Gt as DetrForSegmentation,Wt as DetrModel,$t as DetrObjectDetectionOutput,Ut as DetrPreTrainedModel,Xt as DetrSegmentationOutput,Qt as Dinov2ForImageClassification,Ht as Dinov2Model,Yt as Dinov2PreTrainedModel,Jt as DistilBertForMaskedLM,Zt as DistilBertForQuestionAnswering,Kt as DistilBertForSequenceClassification,es as DistilBertForTokenClassification,ts as DistilBertModel,ss as DistilBertPreTrainedModel,os as DistilBertTokenizer,ns as DocumentQuestionAnsweringPipeline,rs as DonutFeatureExtractor,as as DonutSwinModel,is as DonutSwinPreTrainedModel,ls as EfficientNetForImageClassification,cs as EfficientNetImageProcessor,ds as EfficientNetModel,us as EfficientNetPreTrainedModel,hs as ElectraForMaskedLM,ps as ElectraForQuestionAnswering,_s as ElectraForSequenceClassification,ms as ElectraForTokenClassification,fs as ElectraModel,gs as ElectraPreTrainedModel,Ms as ElectraTokenizer,ws as EsmForMaskedLM,Ts as EsmForSequenceClassification,ks as EsmForTokenClassification,bs as EsmModel,xs as EsmPreTrainedModel,ys as EsmTokenizer,Fs as FFT,Cs as FalconForCausalLM,Ps as FalconModel,vs as FalconPreTrainedModel,Ss as FalconTokenizer,As as FeatureExtractionPipeline,Ls as FeatureExtractor,Es as FillMaskPipeline,zs as GLPNFeatureExtractor,Bs as GLPNForDepthEstimation,Is as GLPNModel,Os as GLPNPreTrainedModel,Ds as GPT2LMHeadModel,Ns as GPT2Model,Vs as GPT2PreTrainedModel,qs as GPT2Tokenizer,js as GPTBigCodeForCausalLM,Rs as GPTBigCodeModel,Gs as GPTBigCodePreTrainedModel,Ws as GPTJForCausalLM,$s as GPTJModel,Us as GPTJPreTrainedModel,Xs as GPTNeoForCausalLM,Qs as GPTNeoModel,Hs as GPTNeoPreTrainedModel,Ys as GPTNeoXForCausalLM,Js as GPTNeoXModel,Zs as GPTNeoXPreTrainedModel,Ks as GPTNeoXTokenizer,eo as GemmaTokenizer,to as Grok1Tokenizer,so as HerbertTokenizer,oo as HubertForCTC,no as HubertForSequenceClassification,ro as HubertModel,ao as HubertPreTrainedModel,io as ImageClassificationPipeline,lo as ImageFeatureExtractionPipeline,co as ImageFeatureExtractor,uo as ImageMattingOutput,ho as ImageSegmentationPipeline,po as ImageToImagePipeline,_o as ImageToTextPipeline,mo as LlamaForCausalLM,fo as LlamaModel,go as LlamaPreTrainedModel,Mo as LlamaTokenizer,wo as LongT5ForConditionalGeneration,To as LongT5Model,ko as LongT5PreTrainedModel,bo as M2M100ForConditionalGeneration,xo as M2M100Model,yo as M2M100PreTrainedModel,Fo as M2M100Tokenizer,Co as MBart50Tokenizer,Po as MBartForCausalLM,vo as MBartForConditionalGeneration,So as MBartForSequenceClassification,Ao as MBartModel,Lo as MBartPreTrainedModel,Eo as MBartTokenizer,zo as MPNetForMaskedLM,Bo as MPNetForQuestionAnswering,Io as MPNetForSequenceClassification,Oo as MPNetForTokenClassification,Do as MPNetModel,No as MPNetPreTrainedModel,Vo as MPNetTokenizer,qo as MT5ForConditionalGeneration,jo as MT5Model,Ro as MT5PreTrainedModel,Go as MarianMTModel,Wo as MarianModel,$o as MarianPreTrainedModel,Uo as MarianTokenizer,Xo as MaskedLMOutput,Qo as MistralForCausalLM,Ho as MistralModel,Yo as MistralPreTrainedModel,Jo as MobileBertForMaskedLM,Zo as MobileBertForQuestionAnswering,Ko as MobileBertForSequenceClassification,en as MobileBertModel,tn as MobileBertPreTrainedModel,sn as MobileBertTokenizer,on as MobileViTFeatureExtractor,nn as MobileViTForImageClassification,rn as MobileViTModel,an as MobileViTPreTrainedModel,ln as ModelOutput,cn as MptForCausalLM,dn as MptModel,un as MptPreTrainedModel,hn as NllbTokenizer,pn as NomicBertModel,_n as NomicBertPreTrainedModel,mn as NougatImageProcessor,fn as NougatTokenizer,gn as OPTForCausalLM,Mn as OPTModel,wn as OPTPreTrainedModel,Tn as ObjectDetectionPipeline,kn as OwlViTFeatureExtractor,bn as OwlViTForObjectDetection,xn as OwlViTModel,yn as OwlViTPreTrainedModel,Fn as OwlViTProcessor,Cn as Owlv2ForObjectDetection,Pn as Owlv2ImageProcessor,vn as Owlv2Model,Sn as Owlv2PreTrainedModel,An as PhiForCausalLM,Ln as PhiModel,En as PhiPreTrainedModel,zn as Pipeline,Bn as PreTrainedModel,In as PreTrainedTokenizer,On as PretrainedConfig,Dn as PretrainedMixin,Nn as Processor,Vn as QuestionAnsweringModelOutput,qn as QuestionAnsweringPipeline,jn as Qwen2ForCausalLM,Rn as Qwen2Model,Gn as Qwen2PreTrainedModel,Wn as Qwen2Tokenizer,$n as RawImage,Un as ResNetForImageClassification,Xn as ResNetModel,Qn as ResNetPreTrainedModel,Hn as RoFormerForMaskedLM,Yn as RoFormerForQuestionAnswering,Jn as RoFormerForSequenceClassification,Zn as RoFormerForTokenClassification,Kn as RoFormerModel,er as RoFormerPreTrainedModel,tr as RoFormerTokenizer,sr as RobertaForMaskedLM,or as RobertaForQuestionAnswering,nr as RobertaForSequenceClassification,rr as RobertaForTokenClassification,ar as RobertaModel,ir as RobertaPreTrainedModel,lr as RobertaTokenizer,cr as SamImageProcessor,dr as SamImageSegmentationOutput,ur as SamModel,hr as SamPreTrainedModel,pr as SamProcessor,_r as SeamlessM4TFeatureExtractor,mr as SegformerFeatureExtractor,fr as SegformerForImageClassification,gr as SegformerForSemanticSegmentation,Mr as SegformerModel,wr as SegformerPreTrainedModel,Tr as Seq2SeqLMOutput,kr as SequenceClassifierOutput,br as SiglipImageProcessor,xr as SiglipModel,yr as SiglipPreTrainedModel,Fr as SiglipTextModel,Cr as SiglipTokenizer,Pr as SiglipVisionModel,vr as SpeechT5FeatureExtractor,Sr as SpeechT5ForSpeechToText,Ar as SpeechT5ForTextToSpeech,Lr as SpeechT5HifiGan,Er as SpeechT5Model,zr as SpeechT5PreTrainedModel,Br as SpeechT5Processor,Ir as SpeechT5Tokenizer,Or as SqueezeBertForMaskedLM,Dr as SqueezeBertForQuestionAnswering,Nr as SqueezeBertForSequenceClassification,Vr as SqueezeBertModel,qr as SqueezeBertPreTrainedModel,jr as SqueezeBertTokenizer,Rr as StableLmForCausalLM,Gr as StableLmModel,Wr as StableLmPreTrainedModel,$r as Starcoder2ForCausalLM,Ur as Starcoder2Model,Xr as Starcoder2PreTrainedModel,Qr as SummarizationPipeline,Hr as Swin2SRForImageSuperResolution,Yr as Swin2SRImageProcessor,Jr as Swin2SRModel,Zr as Swin2SRPreTrainedModel,Kr as SwinForImageClassification,ea as SwinModel,ta as SwinPreTrainedModel,sa as T5ForConditionalGeneration,oa as T5Model,na as T5PreTrainedModel,ra as T5Tokenizer,aa as TableTransformerForObjectDetection,ia as TableTransformerModel,la as TableTransformerObjectDetectionOutput,ca as TableTransformerPreTrainedModel,da as Tensor,ua as Text2TextGenerationPipeline,ha as TextClassificationPipeline,pa as TextGenerationPipeline,_a as TextToAudioPipeline,ma as TokenClassificationPipeline,fa as TokenClassifierOutput,ga as TokenizerModel,Ma as TrOCRForCausalLM,wa as TrOCRPreTrainedModel,Ta as TranslationPipeline,ka as UniSpeechForCTC,ba as UniSpeechForSequenceClassification,xa as UniSpeechModel,ya as UniSpeechPreTrainedModel,Fa as UniSpeechSatForAudioFrameClassification,Ca as UniSpeechSatForCTC,Pa as UniSpeechSatForSequenceClassification,va as UniSpeechSatModel,Sa as UniSpeechSatPreTrainedModel,Aa as ViTFeatureExtractor,La as ViTForImageClassification,Ea as ViTImageProcessor,za as ViTModel,Ba as ViTPreTrainedModel,Ia as VisionEncoderDecoderModel,Oa as VitMatteForImageMatting,Da as VitMatteImageProcessor,Na as VitMattePreTrainedModel,Va as VitsModel,qa as VitsModelOutput,ja as VitsPreTrainedModel,Ra as VitsTokenizer,Ga as Wav2Vec2BertForCTC,Wa as Wav2Vec2BertForSequenceClassification,$a as Wav2Vec2BertModel,Ua as Wav2Vec2BertPreTrainedModel,Xa as Wav2Vec2CTCTokenizer,Qa as Wav2Vec2FeatureExtractor,Ha as Wav2Vec2ForAudioFrameClassification,Ya as Wav2Vec2ForCTC,Ja as Wav2Vec2ForSequenceClassification,Za as Wav2Vec2Model,Ka as Wav2Vec2PreTrainedModel,ei as Wav2Vec2ProcessorWithLM,ti as WavLMForAudioFrameClassification,si as WavLMForCTC,oi as WavLMForSequenceClassification,ni as WavLMForXVector,ri as WavLMModel,ai as WavLMPreTrainedModel,ii as WhisperFeatureExtractor,li as WhisperForConditionalGeneration,ci as WhisperModel,di as WhisperPreTrainedModel,ui as WhisperProcessor,hi as WhisperTokenizer,pi as XLMForQuestionAnswering,_i as XLMForSequenceClassification,mi as XLMForTokenClassification,fi as XLMModel,gi as XLMPreTrainedModel,Mi as XLMRobertaForMaskedLM,wi as XLMRobertaForQuestionAnswering,Ti as XLMRobertaForSequenceClassification,ki as XLMRobertaForTokenClassification,bi as XLMRobertaModel,xi as XLMRobertaPreTrainedModel,yi as XLMRobertaTokenizer,Fi as XLMTokenizer,Ci as XLMWithLMHeadModel,Pi as XVectorOutput,vi as YolosFeatureExtractor,Si as YolosForObjectDetection,Ai as YolosModel,Li as YolosObjectDetectionOutput,Ei as YolosPreTrainedModel,zi as ZeroShotAudioClassificationPipeline,Bi as ZeroShotClassificationPipeline,Ii as ZeroShotImageClassificationPipeline,Oi as ZeroShotObjectDetectionPipeline,Di as bankers_round,Ni as cat,Vi as cos_sim,qi as dot,ji as dynamicTimeWarping,Ri as env,Gi as getTopItems,Wi as hanning,$i as interpolate,Ui as interpolate_data,Xi as layer_norm,Qi as log_softmax,Hi as magnitude,Yi as max,Ji as mean,Zi as mean_pooling,Ki as medianFilter,el as mel_filter_bank,tl as min,sl as ones,ol as ones_like,nl as permute,rl as permute_data,al as pipeline,il as read_audio,ll as round,cl as softmax,dl as spectrogram,ul as stack,hl as std_mean,pl as window_function};
//# sourceMappingURL=transformers.min.js.map
|