summaryrefslogtreecommitdiffstats
path: root/js/src/wasm
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/wasm')
-rw-r--r--js/src/wasm/GenerateBuiltinModules.py93
-rw-r--r--js/src/wasm/WasmBCClass.h8
-rw-r--r--js/src/wasm/WasmBCMemory.cpp8
-rw-r--r--js/src/wasm/WasmBaselineCompile.cpp140
-rw-r--r--js/src/wasm/WasmBinary.h47
-rw-r--r--js/src/wasm/WasmBuiltinModule.cpp109
-rw-r--r--js/src/wasm/WasmBuiltinModule.h61
-rw-r--r--js/src/wasm/WasmBuiltinModule.yaml226
-rw-r--r--js/src/wasm/WasmBuiltins.cpp6
-rw-r--r--js/src/wasm/WasmCode.cpp17
-rw-r--r--js/src/wasm/WasmCode.h5
-rw-r--r--js/src/wasm/WasmCodegenTypes.h2
-rw-r--r--js/src/wasm/WasmCompile.cpp51
-rw-r--r--js/src/wasm/WasmCompile.h13
-rw-r--r--js/src/wasm/WasmCompileArgs.h9
-rw-r--r--js/src/wasm/WasmFeatures.cpp22
-rw-r--r--js/src/wasm/WasmFrameIter.cpp13
-rw-r--r--js/src/wasm/WasmFrameIter.h8
-rw-r--r--js/src/wasm/WasmGcObject-inl.h3
-rw-r--r--js/src/wasm/WasmGenerator.cpp2
-rw-r--r--js/src/wasm/WasmInitExpr.cpp19
-rw-r--r--js/src/wasm/WasmInstance.cpp76
-rw-r--r--js/src/wasm/WasmInstance.h15
-rw-r--r--js/src/wasm/WasmIonCompile.cpp342
-rw-r--r--js/src/wasm/WasmIonCompile.h8
-rw-r--r--js/src/wasm/WasmJS.cpp78
-rw-r--r--js/src/wasm/WasmJS.h6
-rw-r--r--js/src/wasm/WasmModule.cpp2
-rw-r--r--js/src/wasm/WasmOpIter.cpp4
-rw-r--r--js/src/wasm/WasmOpIter.h51
-rw-r--r--js/src/wasm/WasmProcess.cpp13
-rw-r--r--js/src/wasm/WasmSerialize.cpp2
-rw-r--r--js/src/wasm/WasmStaticTypeDefs.cpp50
-rw-r--r--js/src/wasm/WasmStaticTypeDefs.h41
-rw-r--r--js/src/wasm/WasmTypeDef.h9
-rw-r--r--js/src/wasm/WasmValType.cpp99
-rw-r--r--js/src/wasm/WasmValType.h4
-rw-r--r--js/src/wasm/WasmValidate.cpp84
-rw-r--r--js/src/wasm/WasmValidate.h10
-rw-r--r--js/src/wasm/WasmValue.cpp2
-rw-r--r--js/src/wasm/WasmValue.h10
-rw-r--r--js/src/wasm/moz.build1
42 files changed, 1048 insertions, 721 deletions
diff --git a/js/src/wasm/GenerateBuiltinModules.py b/js/src/wasm/GenerateBuiltinModules.py
index 17270bc46e..0bd17d8821 100644
--- a/js/src/wasm/GenerateBuiltinModules.py
+++ b/js/src/wasm/GenerateBuiltinModules.py
@@ -47,6 +47,58 @@ def cppBool(v):
return "false"
+def specTypeToMIRType(specType):
+ if specType == "i32" or specType == "i64" or specType == "f32" or specType == "f64":
+ return f"ValType::{specType}().toMIRType()"
+ if (
+ specType == "externref"
+ or specType == "anyref"
+ or specType == "funcref"
+ or isinstance(specType, dict)
+ ):
+ return "MIRType::WasmAnyRef"
+ raise ValueError()
+
+
+def specHeapTypeToTypeCode(specHeapType):
+ if specHeapType == "func":
+ return "Func"
+ if specHeapType == "any":
+ return "Any"
+ if specHeapType == "extern":
+ return "Extern"
+ if specHeapType == "array":
+ return "Array"
+ if specHeapType == "struct":
+ return "Struct"
+ raise ValueError()
+
+
+def specTypeToValType(specType):
+ if specType == "i32" or specType == "i64" or specType == "f32" or specType == "f64":
+ return f"ValType::{specType}()"
+
+ if specType == "externref":
+ return "ValType(RefType::extern_())"
+
+ if specType == "anyref":
+ return "ValType(RefType::any())"
+
+ if specType == "funcref":
+ return "ValType(RefType::func())"
+
+ if isinstance(specType, dict):
+ nullable = cppBool(specType["nullable"])
+ if "type" in specType:
+ ref = specType["type"]
+ return f"ValType(RefType::fromTypeDef({ref}, {nullable}))"
+ else:
+ code = specType["code"]
+ return f"ValType(RefType::fromTypeCode(TypeCode(RefType::{specHeapTypeToTypeCode(code)}), {nullable}))"
+
+ raise ValueError()
+
+
def main(c_out, yaml_path):
data = load_yaml(yaml_path)
@@ -64,34 +116,45 @@ def main(c_out, yaml_path):
for op in data:
# Define DECLARE_BUILTIN_MODULE_FUNC_PARAM_VALTYPES_<op> as:
# `{ValType::I32, ValType::I32, ...}`.
+ valTypes = ", ".join(specTypeToValType(p) for p in op["params"])
contents += (
f"#define DECLARE_BUILTIN_MODULE_FUNC_PARAM_VALTYPES_{op['op']} "
- f"{{{', '.join(op['params'])}}}\n"
+ f"{{{valTypes}}}\n"
)
- # Define DECLARE_BUILTIN_MODULE_FUNC_PARAM_SASTYPES_<op> as:
- # `<num_types>, {_PTR, _I32, ..., _PTR, _END}`.
+ # Define DECLARE_BUILTIN_MODULE_FUNC_PARAM_MIRTYPES_<op> as:
+ # `<num_types>, {MIRType::Pointer, _I32, ..., MIRType::Pointer, _END}`.
num_types = len(op["params"]) + 1
- sas_types = (
- f"{{_PTR{''.join(', ' + (p + '.toMIRType()') for p in op['params'])}"
- )
+ mir_types = "{MIRType::Pointer"
+ mir_types += "".join(", " + specTypeToMIRType(p) for p in op["params"])
if op["uses_memory"]:
- sas_types += ", _PTR"
+ mir_types += ", MIRType::Pointer"
num_types += 1
- sas_types += ", _END}"
+ # Add the end marker
+ mir_types += ", MIRType::None}"
- contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_PARAM_SASTYPES_{op['op']} {num_types}, {sas_types}\n"
+ contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_PARAM_MIRTYPES_{op['op']} {num_types}, {mir_types}\n"
+ # Define DECLARE_BUILTIN_MODULE_FUNC_RESULT_VALTYPE_<op> as:
+ # `Some(X)` if present, or else `Nothing()`.
result_valtype = ""
- result_sastype = ""
if "result" in op:
- result_valtype = f"Some({op['result']})\n"
- result_sastype = f"{op['result']}.toMIRType()\n"
+ result_valtype = f"Some({specTypeToValType(op['result'])})\n"
else:
result_valtype = "Nothing()"
- result_sastype = "_VOID"
contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_RESULT_VALTYPE_{op['op']} {result_valtype}\n"
- contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_RESULT_SASTYPE_{op['op']} {result_sastype}\n"
- contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_FAILMODE_{op['op']} _{op['fail_mode']}\n"
+
+ # Define DECLARE_BUILTIN_MODULE_FUNC_RESULT_MIRTYPE_<op> as:
+ # `X` if present, or else `MIRType::None`.
+ result_mirtype = ""
+ if "result" in op:
+ result_mirtype = specTypeToMIRType(op["result"]) + "\n"
+ else:
+ result_mirtype = "MIRType::None"
+ contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_RESULT_MIRTYPE_{op['op']} {result_mirtype}\n"
+
+ # Define DECLARE_BUILTIN_MODULE_FUNC_FAILMODE_<op> as:
+ # `FailureMode::X`.
+ contents += f"#define DECLARE_BUILTIN_MODULE_FUNC_FAILMODE_{op['op']} FailureMode::{op['fail_mode']}\n"
generate_header(c_out, "wasm_WasmBuiltinModuleGenerated_h", contents)
diff --git a/js/src/wasm/WasmBCClass.h b/js/src/wasm/WasmBCClass.h
index c216d0ffd5..844ae3381a 100644
--- a/js/src/wasm/WasmBCClass.h
+++ b/js/src/wasm/WasmBCClass.h
@@ -297,6 +297,10 @@ struct BaseCompiler final {
// Flag indicating that the compiler is currently in a dead code region.
bool deadCode_;
+ // Store previously finished note to know if we need to insert a nop in
+ // finishTryNote.
+ size_t mostRecentFinishedTryNoteIndex_;
+
///////////////////////////////////////////////////////////////////////////
//
// State for bounds check elimination.
@@ -973,7 +977,7 @@ struct BaseCompiler final {
bool tailCall, CodeOffset* fastCallOffset,
CodeOffset* slowCallOffset);
CodeOffset callImport(unsigned instanceDataOffset, const FunctionCall& call);
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
void callRef(const Stk& calleeRef, const FunctionCall& call,
CodeOffset* fastCallOffset, CodeOffset* slowCallOffset);
# ifdef ENABLE_WASM_TAIL_CALLS
@@ -1641,7 +1645,7 @@ struct BaseCompiler final {
[[nodiscard]] bool emitRefFunc();
[[nodiscard]] bool emitRefNull();
[[nodiscard]] bool emitRefIsNull();
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
[[nodiscard]] bool emitRefAsNonNull();
[[nodiscard]] bool emitBrOnNull();
[[nodiscard]] bool emitBrOnNonNull();
diff --git a/js/src/wasm/WasmBCMemory.cpp b/js/src/wasm/WasmBCMemory.cpp
index f4e19d95e8..b6ef67f24d 100644
--- a/js/src/wasm/WasmBCMemory.cpp
+++ b/js/src/wasm/WasmBCMemory.cpp
@@ -1212,7 +1212,7 @@ static void PopAndAllocate(BaseCompiler* bc, ValType type,
Scalar::Type viewType, AtomicOp op, RegI32* rd,
RegI32* rv, Temps* temps) {
bc->needI32(bc->specific_.eax);
- if (op == AtomicFetchAddOp || op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Add || op == AtomicOp::Sub) {
// We use xadd, so source and destination are the same. Using
// eax here is overconstraining, but for byte operations on x86
// we do need something with a byte register.
@@ -1246,7 +1246,7 @@ static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access, T srcAddr,
# else
RegI32 temp;
ScratchI32 scratch(*bc);
- if (op != AtomicFetchAddOp && op != AtomicFetchSubOp) {
+ if (op != AtomicOp::Add && op != AtomicOp::Sub) {
temp = scratch;
}
# endif
@@ -1401,7 +1401,7 @@ namespace atomic_rmw64 {
static void PopAndAllocate(BaseCompiler* bc, AtomicOp op, RegI64* rd,
RegI64* rv, RegI64* temp) {
- if (op == AtomicFetchAddOp || op == AtomicFetchSubOp) {
+ if (op == AtomicOp::Add || op == AtomicOp::Sub) {
// We use xaddq, so input and output must be the same register.
*rv = bc->popI64();
*rd = *rv;
@@ -1422,7 +1422,7 @@ static void Perform(BaseCompiler* bc, const MemoryAccessDesc& access,
static void Deallocate(BaseCompiler* bc, AtomicOp op, RegI64 rv, RegI64 temp) {
bc->maybeFree(temp);
- if (op != AtomicFetchAddOp && op != AtomicFetchSubOp) {
+ if (op != AtomicOp::Add && op != AtomicOp::Sub) {
bc->freeI64(rv);
}
}
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
index 196e49f76e..cb0fbde6ec 100644
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -1658,7 +1658,7 @@ bool BaseCompiler::callIndirect(uint32_t funcTypeIndex, uint32_t tableIndex,
return true;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
void BaseCompiler::callRef(const Stk& calleeRef, const FunctionCall& call,
CodeOffset* fastCallOffset,
CodeOffset* slowCallOffset) {
@@ -1788,15 +1788,25 @@ void BaseCompiler::finishTryNote(size_t tryNoteIndex) {
masm.nop();
}
- // Check the previous try note to ensure that we don't share an edge with
- // it that could lead to ambiguity. Insert a nop, if required.
- if (tryNotes.length() > 0) {
- const TryNote& previous = tryNotes.back();
+ // Check the most recent finished try note to ensure that we don't share an
+ // edge with it that could lead to ambiguity. Insert a nop, if required.
+ //
+ // Notice that finishTryNote is called in LIFO order -- using depth-first
+ // search numbering to see if we are traversing back from a nested try to a
+ // parent try, where we may need to ensure that the end offsets do not
+ // coincide.
+ //
+ // In the case the tryNodeIndex >= mostRecentFinishedTryNoteIndex_, we have
+ // finished a try that began after the most recent finished try, and so
+ // startTryNote will take care of any nops.
+ if (tryNoteIndex < mostRecentFinishedTryNoteIndex_) {
+ const TryNote& previous = tryNotes[mostRecentFinishedTryNoteIndex_];
uint32_t currentOffset = masm.currentOffset();
if (previous.tryBodyEnd() == currentOffset) {
masm.nop();
}
}
+ mostRecentFinishedTryNoteIndex_ = tryNoteIndex;
// Don't set the end of the try note if we've OOM'ed, as the above nop's may
// not have been placed. This is okay as this compilation will be thrown
@@ -3875,7 +3885,7 @@ bool BaseCompiler::emitBrIf() {
return emitBranchPerform(&b);
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
bool BaseCompiler::emitBrOnNull() {
MOZ_ASSERT(!hasLatentOp());
@@ -5286,7 +5296,7 @@ bool BaseCompiler::emitReturnCallIndirect() {
}
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
bool BaseCompiler::emitCallRef() {
const FuncType* funcType;
Nothing unused_callee;
@@ -6289,7 +6299,7 @@ bool BaseCompiler::emitRefIsNull() {
return true;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
bool BaseCompiler::emitRefAsNonNull() {
Nothing nothing;
if (!iter_.readRefAsNonNull(&nothing)) {
@@ -9776,13 +9786,13 @@ bool BaseCompiler::emitCallBuiltinModuleFunc() {
return true;
}
- if (builtinModuleFunc->usesMemory) {
+ if (builtinModuleFunc->usesMemory()) {
// The final parameter of an builtinModuleFunc is implicitly the heap base
pushHeapBase(0);
}
// Call the builtinModuleFunc
- return emitInstanceCall(builtinModuleFunc->signature);
+ return emitInstanceCall(*builtinModuleFunc->sig());
}
//////////////////////////////////////////////////////////////////////////////
@@ -9989,36 +9999,18 @@ bool BaseCompiler::emitBody() {
case uint16_t(Op::Else):
CHECK_NEXT(emitElse());
case uint16_t(Op::Try):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitTry());
case uint16_t(Op::Catch):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitCatch());
case uint16_t(Op::CatchAll):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitCatchAll());
case uint16_t(Op::Delegate):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK(emitDelegate());
iter_.popDelegate();
NEXT();
case uint16_t(Op::Throw):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitThrow());
case uint16_t(Op::Rethrow):
- if (!moduleEnv_.exceptionsEnabled()) {
- return iter_.unrecognizedOpcode(&op);
- }
CHECK_NEXT(emitRethrow());
case uint16_t(Op::ThrowRef):
if (!moduleEnv_.exnrefEnabled()) {
@@ -10063,16 +10055,15 @@ bool BaseCompiler::emitBody() {
}
CHECK_NEXT(emitReturnCallIndirect());
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::CallRef):
- if (!moduleEnv_.functionReferencesEnabled()) {
+ if (!moduleEnv_.gcEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitCallRef());
# ifdef ENABLE_WASM_TAIL_CALLS
case uint16_t(Op::ReturnCallRef):
- if (!moduleEnv_.functionReferencesEnabled() ||
- !moduleEnv_.tailCallsEnabled()) {
+ if (!moduleEnv_.gcEnabled() || !moduleEnv_.tailCallsEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitReturnCallRef());
@@ -10609,19 +10600,19 @@ bool BaseCompiler::emitBody() {
case uint16_t(Op::MemorySize):
CHECK_NEXT(emitMemorySize());
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefAsNonNull):
- if (!moduleEnv_.functionReferencesEnabled()) {
+ if (!moduleEnv_.gcEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitRefAsNonNull());
case uint16_t(Op::BrOnNull):
- if (!moduleEnv_.functionReferencesEnabled()) {
+ if (!moduleEnv_.gcEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitBrOnNull());
case uint16_t(Op::BrOnNonNull):
- if (!moduleEnv_.functionReferencesEnabled()) {
+ if (!moduleEnv_.gcEnabled()) {
return iter_.unrecognizedOpcode(&op);
}
CHECK_NEXT(emitBrOnNonNull());
@@ -11484,113 +11475,113 @@ bool BaseCompiler::emitBody() {
case uint32_t(ThreadOp::I32AtomicAdd):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicAdd8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicAdd16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAddOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicSub):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicSub8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicSub16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchSubOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicAnd):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicAnd8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicAnd16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchAndOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicOr):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicOr8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicOr16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchOrOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicXor):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I32, Scalar::Int32, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I64, Scalar::Int64, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXor8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint8, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXor16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I32, Scalar::Uint16, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor8U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint8, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor16U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint16, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor32U):
CHECK_NEXT(
- emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicFetchXorOp));
+ emitAtomicRMW(ValType::I64, Scalar::Uint32, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXchg):
CHECK_NEXT(emitAtomicXchg(ValType::I32, Scalar::Int32));
@@ -11851,6 +11842,8 @@ BaseCompiler::BaseCompiler(const ModuleEnvironment& moduleEnv,
stackMapGenerator_(stackMaps, trapExitLayout, trapExitLayoutNumWords,
*masm),
deadCode_(false),
+ // Init value is selected to ensure proper logic in finishTryNote.
+ mostRecentFinishedTryNoteIndex_(0),
bceSafe_(0),
latentOp_(LatentOp::None),
latentType_(ValType::I32),
@@ -11979,10 +11972,7 @@ bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& moduleEnv,
// Build the local types vector.
ValTypeVector locals;
- if (!locals.appendAll(moduleEnv.funcs[func.index].type->args())) {
- return false;
- }
- if (!DecodeLocalEntries(d, *moduleEnv.types, moduleEnv.features, &locals)) {
+ if (!DecodeLocalEntriesWithParams(d, moduleEnv, func.index, &locals)) {
return false;
}
diff --git a/js/src/wasm/WasmBinary.h b/js/src/wasm/WasmBinary.h
index 2d41528157..da17a0a864 100644
--- a/js/src/wasm/WasmBinary.h
+++ b/js/src/wasm/WasmBinary.h
@@ -72,12 +72,18 @@ class Opcode {
static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
}
+ MOZ_IMPLICIT Opcode(GcOp op)
+ : bits_((uint32_t(op) << 8) | uint32_t(Op::GcPrefix)) {
+ static_assert(size_t(SimdOp::Limit) <= 0xFFFFFF, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(SimdOp::Limit));
+ }
bool isOp() const { return bits_ < uint32_t(Op::FirstPrefix); }
bool isMisc() const { return (bits_ & 255) == uint32_t(Op::MiscPrefix); }
bool isThread() const { return (bits_ & 255) == uint32_t(Op::ThreadPrefix); }
bool isMoz() const { return (bits_ & 255) == uint32_t(Op::MozPrefix); }
bool isSimd() const { return (bits_ & 255) == uint32_t(Op::SimdPrefix); }
+ bool isGc() const { return (bits_ & 255) == uint32_t(Op::GcPrefix); }
Op asOp() const {
MOZ_ASSERT(isOp());
@@ -99,6 +105,10 @@ class Opcode {
MOZ_ASSERT(isSimd());
return SimdOp(bits_ >> 8);
}
+ GcOp asGc() const {
+ MOZ_ASSERT(isGc());
+ return GcOp(bits_ >> 8);
+ }
uint32_t bits() const { return bits_; }
@@ -127,6 +137,7 @@ using MaybeSectionRange = Maybe<SectionRange>;
class Encoder {
Bytes& bytes_;
+ const TypeContext* types_;
template <class T>
[[nodiscard]] bool write(const T& v) {
@@ -201,7 +212,13 @@ class Encoder {
}
public:
- explicit Encoder(Bytes& bytes) : bytes_(bytes) { MOZ_ASSERT(empty()); }
+ explicit Encoder(Bytes& bytes) : bytes_(bytes), types_(nullptr) {
+ MOZ_ASSERT(empty());
+ }
+ explicit Encoder(Bytes& bytes, const TypeContext& types)
+ : bytes_(bytes), types_(&types) {
+ MOZ_ASSERT(empty());
+ }
size_t currentOffset() const { return bytes_.length(); }
bool empty() const { return currentOffset() == 0; }
@@ -226,9 +243,17 @@ class Encoder {
[[nodiscard]] bool writeVarS64(int64_t i) { return writeVarS<int64_t>(i); }
[[nodiscard]] bool writeValType(ValType type) {
static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
- // writeValType is only used by asm.js, which doesn't use type
- // references
- MOZ_RELEASE_ASSERT(!type.isTypeRef(), "NYI");
+ if (type.isTypeRef()) {
+ MOZ_RELEASE_ASSERT(types_,
+ "writeValType is used, but types were not specified.");
+ if (!writeFixedU8(uint8_t(type.isNullable() ? TypeCode::NullableRef
+ : TypeCode::Ref))) {
+ return false;
+ }
+ uint32_t typeIndex = types_->indexOf(*type.typeDef());
+ // Encode positive LEB S33 as S64.
+ return writeVarS64(typeIndex);
+ }
TypeCode tc = type.packed().typeCode();
MOZ_ASSERT(size_t(tc) < size_t(TypeCode::Limit));
return writeFixedU8(uint8_t(tc));
@@ -693,9 +718,9 @@ inline bool Decoder::readPackedType(const TypeContext& types,
}
case uint8_t(TypeCode::Ref):
case uint8_t(TypeCode::NullableRef): {
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
- if (!features.functionReferences) {
- return fail("(ref T) types not enabled");
+#ifdef ENABLE_WASM_GC
+ if (!features.gc) {
+ return fail("gc not enabled");
}
bool nullable = code == uint8_t(TypeCode::NullableRef);
RefType refType;
@@ -718,7 +743,7 @@ inline bool Decoder::readPackedType(const TypeContext& types,
case uint8_t(TypeCode::NullAnyRef): {
#ifdef ENABLE_WASM_GC
if (!features.gc) {
- return fail("gc types not enabled");
+ return fail("gc not enabled");
}
*type = RefType::fromTypeCode(TypeCode(code), true);
return true;
@@ -784,7 +809,7 @@ inline bool Decoder::readHeapType(const TypeContext& types,
case uint8_t(TypeCode::NullExternRef):
case uint8_t(TypeCode::NullAnyRef):
if (!features.gc) {
- return fail("gc types not enabled");
+ return fail("gc not enabled");
}
*type = RefType::fromTypeCode(TypeCode(code), nullable);
return true;
@@ -794,8 +819,8 @@ inline bool Decoder::readHeapType(const TypeContext& types,
}
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
- if (features.functionReferences) {
+#ifdef ENABLE_WASM_GC
+ if (features.gc) {
int32_t x;
if (!readVarS32(&x) || x < 0 || uint32_t(x) >= types.length()) {
return fail("invalid heap type index");
diff --git a/js/src/wasm/WasmBuiltinModule.cpp b/js/src/wasm/WasmBuiltinModule.cpp
index 0748977c8b..044591224e 100644
--- a/js/src/wasm/WasmBuiltinModule.cpp
+++ b/js/src/wasm/WasmBuiltinModule.cpp
@@ -27,28 +27,29 @@
#include "wasm/WasmJS.h"
#include "wasm/WasmModule.h"
#include "wasm/WasmOpIter.h"
+#include "wasm/WasmStaticTypeDefs.h"
#include "wasm/WasmValidate.h"
using namespace js;
using namespace js::wasm;
-#define VISIT_BUILTIN_FUNC(op, export, sa_name, abitype, entry, uses_memory, \
- idx) \
- static const ValType BuiltinModuleFunc##op##_Params[] = \
- DECLARE_BUILTIN_MODULE_FUNC_PARAM_VALTYPES_##op; \
- \
- const BuiltinModuleFunc BuiltinModuleFunc##op = { \
- export, \
- mozilla::Span<const ValType>(BuiltinModuleFunc##op##_Params), \
- DECLARE_BUILTIN_MODULE_FUNC_RESULT_VALTYPE_##op, \
- SASig##sa_name, \
- uses_memory, \
- };
-
-FOR_EACH_BUILTIN_MODULE_FUNC(VISIT_BUILTIN_FUNC)
-#undef VISIT_BUILTIN_FUNC
+BuiltinModuleFuncs* BuiltinModuleFuncs::singleton_ = nullptr;
+
+[[nodiscard]] bool BuiltinModuleFunc::init(const RefPtr<TypeContext>& types,
+ mozilla::Span<const ValType> params,
+ Maybe<ValType> result,
+ bool usesMemory,
+ const SymbolicAddressSignature* sig,
+ const char* exportName) {
+ // This builtin must not have been initialized yet.
+ MOZ_ASSERT(!recGroup_);
+
+ // Initialize the basic fields
+ exportName_ = exportName;
+ sig_ = sig;
+ usesMemory_ = usesMemory;
-bool BuiltinModuleFunc::funcType(FuncType* type) const {
+ // Create a function type for the given params and result
ValTypeVector paramVec;
if (!paramVec.append(params.data(), params.data() + params.size())) {
return false;
@@ -57,21 +58,48 @@ bool BuiltinModuleFunc::funcType(FuncType* type) const {
if (result.isSome() && !resultVec.append(*result)) {
return false;
}
- *type = FuncType(std::move(paramVec), std::move(resultVec));
+ const TypeDef* typeDef =
+ types->addType(FuncType(std::move(paramVec), std::move(resultVec)));
+ if (!typeDef) {
+ return false;
+ }
+ recGroup_ = &typeDef->recGroup();
return true;
}
-/* static */
-const BuiltinModuleFunc& BuiltinModuleFunc::getFromId(BuiltinModuleFuncId id) {
- switch (id) {
-#define VISIT_BUILTIN_FUNC(op, ...) \
- case BuiltinModuleFuncId::op: \
- return BuiltinModuleFunc##op;
- FOR_EACH_BUILTIN_MODULE_FUNC(VISIT_BUILTIN_FUNC)
+bool BuiltinModuleFuncs::init() {
+ singleton_ = js_new<BuiltinModuleFuncs>();
+ if (!singleton_) {
+ return false;
+ }
+
+ RefPtr<TypeContext> types = js_new<TypeContext>();
+ if (!types) {
+ return false;
+ }
+
+#define VISIT_BUILTIN_FUNC(op, export, sa_name, abitype, entry, uses_memory, \
+ ...) \
+ const ValType op##Params[] = \
+ DECLARE_BUILTIN_MODULE_FUNC_PARAM_VALTYPES_##op; \
+ Maybe<ValType> op##Result = DECLARE_BUILTIN_MODULE_FUNC_RESULT_VALTYPE_##op; \
+ if (!singleton_->funcs_[BuiltinModuleFuncId::op].init( \
+ types, mozilla::Span<const ValType>(op##Params), op##Result, \
+ uses_memory, &SASig##sa_name, export)) { \
+ return false; \
+ }
+ FOR_EACH_BUILTIN_MODULE_FUNC(VISIT_BUILTIN_FUNC)
#undef VISIT_BUILTIN_FUNC
- default:
- MOZ_CRASH("unexpected builtinModuleFunc");
+
+ return true;
+}
+
+void BuiltinModuleFuncs::destroy() {
+ if (!singleton_) {
+ return;
}
+ js_delete(singleton_);
+ singleton_ = nullptr;
}
bool EncodeFuncBody(const BuiltinModuleFunc& builtinModuleFunc,
@@ -80,7 +108,8 @@ bool EncodeFuncBody(const BuiltinModuleFunc& builtinModuleFunc,
if (!EncodeLocalEntries(encoder, ValTypeVector())) {
return false;
}
- for (uint32_t i = 0; i < builtinModuleFunc.params.size(); i++) {
+ const FuncType* funcType = builtinModuleFunc.funcType();
+ for (uint32_t i = 0; i < funcType->args().length(); i++) {
if (!encoder.writeOp(Op::LocalGet) || !encoder.writeVarU32(i)) {
return false;
}
@@ -145,11 +174,11 @@ bool CompileBuiltinModule(JSContext* cx,
for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
const BuiltinModuleFuncId& id = ids[funcIndex];
const BuiltinModuleFunc& builtinModuleFunc =
- BuiltinModuleFunc::getFromId(id);
+ BuiltinModuleFuncs::getFromId(id);
- FuncType type;
- if (!builtinModuleFunc.funcType(&type) ||
- !moduleEnv.types->addType(std::move(type))) {
+ SharedRecGroup recGroup = builtinModuleFunc.recGroup();
+ MOZ_ASSERT(recGroup->numTypes() == 1);
+ if (!moduleEnv.types->addRecGroup(recGroup)) {
ReportOutOfMemory(cx);
return false;
}
@@ -170,10 +199,10 @@ bool CompileBuiltinModule(JSContext* cx,
// Add (export "$name" (func $i)) declarations.
for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
const BuiltinModuleFunc& builtinModuleFunc =
- BuiltinModuleFunc::getFromId(ids[funcIndex]);
+ BuiltinModuleFuncs::getFromId(ids[funcIndex]);
CacheableName exportName;
- if (!CacheableName::fromUTF8Chars(builtinModuleFunc.exportName,
+ if (!CacheableName::fromUTF8Chars(builtinModuleFunc.exportName(),
&exportName) ||
!moduleEnv.exports.append(Export(std::move(exportName), funcIndex,
DefinitionKind::Function))) {
@@ -200,7 +229,7 @@ bool CompileBuiltinModule(JSContext* cx,
for (uint32_t funcIndex = 0; funcIndex < ids.size(); funcIndex++) {
BuiltinModuleFuncId id = ids[funcIndex];
const BuiltinModuleFunc& builtinModuleFunc =
- BuiltinModuleFunc::getFromId(ids[funcIndex]);
+ BuiltinModuleFuncs::getFromId(ids[funcIndex]);
// Compilation may be done using other threads, ModuleGenerator requires
// that function bodies live until after finishFuncDefs().
@@ -267,14 +296,16 @@ static BuiltinModuleFuncId IntGemmFuncs[] = {
#ifdef ENABLE_WASM_JS_STRING_BUILTINS
static BuiltinModuleFuncId JSStringFuncs[] = {
- BuiltinModuleFuncId::StringFromWTF16Array,
- BuiltinModuleFuncId::StringToWTF16Array,
+ BuiltinModuleFuncId::StringTest,
+ BuiltinModuleFuncId::StringCast,
+ BuiltinModuleFuncId::StringFromCharCodeArray,
+ BuiltinModuleFuncId::StringIntoCharCodeArray,
BuiltinModuleFuncId::StringFromCharCode,
BuiltinModuleFuncId::StringFromCodePoint,
BuiltinModuleFuncId::StringCharCodeAt,
BuiltinModuleFuncId::StringCodePointAt,
BuiltinModuleFuncId::StringLength,
- BuiltinModuleFuncId::StringConcatenate,
+ BuiltinModuleFuncId::StringConcat,
BuiltinModuleFuncId::StringSubstring,
BuiltinModuleFuncId::StringEquals,
BuiltinModuleFuncId::StringCompare};
@@ -300,8 +331,8 @@ Maybe<const BuiltinModuleFunc*> wasm::ImportMatchesBuiltinModuleFunc(
// Not supported for implicit instantiation yet
MOZ_RELEASE_ASSERT(module == BuiltinModuleId::JSString);
for (BuiltinModuleFuncId funcId : JSStringFuncs) {
- const BuiltinModuleFunc& func = BuiltinModuleFunc::getFromId(funcId);
- if (importName == mozilla::MakeStringSpan(func.exportName)) {
+ const BuiltinModuleFunc& func = BuiltinModuleFuncs::getFromId(funcId);
+ if (importName == mozilla::MakeStringSpan(func.exportName())) {
return Some(&func);
}
}
diff --git a/js/src/wasm/WasmBuiltinModule.h b/js/src/wasm/WasmBuiltinModule.h
index 42faffec73..8646e789e6 100644
--- a/js/src/wasm/WasmBuiltinModule.h
+++ b/js/src/wasm/WasmBuiltinModule.h
@@ -62,25 +62,60 @@ struct MOZ_STACK_CLASS BuiltinModuleInstances {
// An builtin module func is a natively implemented function that may be
// compiled into a 'builtin module', which may be instantiated with a provided
// memory yielding an exported WebAssembly function wrapping the builtin module.
-struct BuiltinModuleFunc {
+class BuiltinModuleFunc {
+ private:
+ SharedRecGroup recGroup_;
+ const char* exportName_;
+ const SymbolicAddressSignature* sig_;
+ bool usesMemory_;
+
+ public:
+ // Default constructor so this can be used in an EnumeratedArray.
+ BuiltinModuleFunc() = default;
+
+ // Initialize this builtin. Must only be called once.
+ [[nodiscard]] bool init(const RefPtr<TypeContext>& types,
+ mozilla::Span<const ValType> params,
+ Maybe<ValType> result, bool usesMemory,
+ const SymbolicAddressSignature* sig,
+ const char* exportName);
+
+ // The rec group for the function type for this builtin.
+ const RecGroup* recGroup() const { return recGroup_.get(); }
+ // The type definition for the function type for this builtin.
+ const TypeDef* typeDef() const { return &recGroup_->type(0); }
+ // The function type for this builtin.
+ const FuncType* funcType() const { return &typeDef()->funcType(); }
+
// The name of the func as it is exported
- const char* exportName;
- // The params taken by the func.
- mozilla::Span<const ValType> params;
- // The optional result returned by the func.
- mozilla::Maybe<const ValType> result;
- // The signature of the builtin that implements the func
- const SymbolicAddressSignature& signature;
+ const char* exportName() const { return exportName_; }
+ // The signature of the builtin that implements this function.
+ const SymbolicAddressSignature* sig() const { return sig_; }
// Whether this function takes a pointer to the memory base as a hidden final
- // parameter.
- bool usesMemory;
+ // parameter. This parameter will show up in the SymbolicAddressSignature,
+ // but not the function type. Compilers must pass the memoryBase to the
+ // function call as the last parameter.
+ bool usesMemory() const { return usesMemory_; }
+};
+
+// Static storage for all builtin module funcs in the system.
+class BuiltinModuleFuncs {
+ using Storage =
+ mozilla::EnumeratedArray<BuiltinModuleFuncId, BuiltinModuleFunc,
+ size_t(BuiltinModuleFuncId::Limit)>;
+ Storage funcs_;
- // Allocate a FuncType for this func, returning false for OOM
- bool funcType(FuncType* type) const;
+ static BuiltinModuleFuncs* singleton_;
+
+ public:
+ [[nodiscard]] static bool init();
+ static void destroy();
// Get the BuiltinModuleFunc for an BuiltinModuleFuncId. BuiltinModuleFuncId
// must be validated.
- static const BuiltinModuleFunc& getFromId(BuiltinModuleFuncId id);
+ static const BuiltinModuleFunc& getFromId(BuiltinModuleFuncId id) {
+ return singleton_->funcs_[id];
+ }
};
Maybe<BuiltinModuleId> ImportMatchesBuiltinModule(
diff --git a/js/src/wasm/WasmBuiltinModule.yaml b/js/src/wasm/WasmBuiltinModule.yaml
index 88c2f5a575..755e0e5e74 100644
--- a/js/src/wasm/WasmBuiltinModule.yaml
+++ b/js/src/wasm/WasmBuiltinModule.yaml
@@ -12,10 +12,10 @@
entry: Instance::intrI8VecMul
export: i8vecmul
params:
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -40,12 +40,12 @@
entry: intgemm::IntrI8PrepareB
export: int8_prepare_b
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -65,12 +65,12 @@
entry: intgemm::IntrI8PrepareBFromTransposed
export: int8_prepare_b_from_transposed
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -90,10 +90,10 @@
entry: intgemm::IntrI8PrepareBFromQuantizedTransposed
export: int8_prepare_b_from_quantized_transposed
params:
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -116,12 +116,12 @@
entry: intgemm::IntrI8PrepareA
export: int8_prepare_a
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -142,15 +142,15 @@
entry: intgemm::IntrI8PrepareBias
export: int8_prepare_bias
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -177,18 +177,18 @@
entry: intgemm::IntrI8MultiplyAndAddBias
export: int8_multiply_and_add_bias
params:
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::f32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'f32'
+ - 'f32'
+ - 'i32'
+ - 'f32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -206,12 +206,12 @@
entry: intgemm::IntrI8SelectColumnsOfB
export: int8_select_columns_of_b
params:
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
- - 'ValType::i32()'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
+ - 'i32'
fail_mode: FailOnNegI32
uses_memory: true
@@ -219,31 +219,61 @@
#if defined(ENABLE_WASM_JS_STRING_BUILTINS)
-- op: StringFromWTF16Array
+- op: StringTest
symbolic_address:
- name: StringFromWTF16Array
+ name: StringTest
+ type: Args_Int32_GeneralGeneral
+ entry: Instance::stringTest
+ export: test
+ params:
+ - 'externref'
+ result: 'i32'
+ fail_mode: Infallible
+ uses_memory: false
+
+- op: StringCast
+ symbolic_address:
+ name: StringCast
+ type: Args_General2
+ entry: Instance::stringCast
+ export: cast
+ params:
+ - 'externref'
+ result:
+ code: 'extern'
+ nullable: false
+ fail_mode: FailOnNullPtr
+ uses_memory: false
+
+- op: StringFromCharCodeArray
+ symbolic_address:
+ name: StringFromCharCodeArray
type: Args_General_GeneralGeneralInt32Int32
- entry: Instance::stringFromWTF16Array
- export: fromWTF16Array
+ entry: Instance::stringFromCharCodeArray
+ export: fromCharCodeArray
params:
- - 'ValType(RefType::any())'
- - 'ValType::i32()'
- - 'ValType::i32()'
- result: 'ValType(RefType::extern_())'
+ - type: "StaticTypeDefs::arrayMutI16"
+ nullable: true
+ - 'i32'
+ - 'i32'
+ result:
+ code: 'extern'
+ nullable: false
fail_mode: FailOnNullPtr
uses_memory: false
-- op: StringToWTF16Array
+- op: StringIntoCharCodeArray
symbolic_address:
- name: StringToWTF16Array
+ name: StringIntoCharCodeArray
type: Args_Int32_GeneralGeneralGeneralInt32
- entry: Instance::stringToWTF16Array
- export: toWTF16Array
+ entry: Instance::stringIntoCharCodeArray
+ export: intoCharCodeArray
params:
- - 'ValType(RefType::extern_())'
- - 'ValType(RefType::any())'
- - 'ValType::i32()'
- result: 'ValType::i32()'
+ - 'externref'
+ - type: "StaticTypeDefs::arrayMutI16"
+ nullable: true
+ - 'i32'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
@@ -254,8 +284,8 @@
entry: Instance::stringFromCharCode
export: fromCharCode
params:
- - 'ValType::i32()'
- result: 'ValType(RefType::extern_())'
+ - 'i32'
+ result: 'externref'
fail_mode: FailOnNullPtr
uses_memory: false
@@ -266,8 +296,8 @@
entry: Instance::stringFromCodePoint
export: fromCodePoint
params:
- - 'ValType::i32()'
- result: 'ValType(RefType::extern_())'
+ - 'i32'
+ result: 'externref'
fail_mode: FailOnNullPtr
uses_memory: false
@@ -278,9 +308,9 @@
entry: Instance::stringCharCodeAt
export: charCodeAt
params:
- - 'ValType(RefType::extern_())'
- - 'ValType::i32()'
- result: 'ValType::i32()'
+ - 'externref'
+ - 'i32'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
@@ -291,9 +321,9 @@
entry: Instance::stringCodePointAt
export: codePointAt
params:
- - 'ValType(RefType::extern_())'
- - 'ValType::i32()'
- result: 'ValType::i32()'
+ - 'externref'
+ - 'i32'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
@@ -304,21 +334,21 @@
entry: Instance::stringLength
export: length
params:
- - 'ValType(RefType::extern_())'
- result: 'ValType::i32()'
+ - 'externref'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
-- op: StringConcatenate
+- op: StringConcat
symbolic_address:
- name: StringConcatenate
+ name: StringConcat
type: Args_General3
- entry: Instance::stringConcatenate
- export: concatenate
+ entry: Instance::stringConcat
+ export: concat
params:
- - 'ValType(RefType::extern_())'
- - 'ValType(RefType::extern_())'
- result: 'ValType(RefType::extern_())'
+ - 'externref'
+ - 'externref'
+ result: 'externref'
fail_mode: FailOnNullPtr
uses_memory: false
@@ -329,10 +359,10 @@
entry: Instance::stringSubstring
export: substring
params:
- - 'ValType(RefType::extern_())'
- - 'ValType::i32()'
- - 'ValType::i32()'
- result: 'ValType(RefType::extern_())'
+ - 'externref'
+ - 'i32'
+ - 'i32'
+ result: 'externref'
fail_mode: FailOnNullPtr
uses_memory: false
@@ -343,9 +373,9 @@
entry: Instance::stringEquals
export: equals
params:
- - 'ValType(RefType::extern_())'
- - 'ValType(RefType::extern_())'
- result: 'ValType::i32()'
+ - 'externref'
+ - 'externref'
+ result: 'i32'
fail_mode: FailOnNegI32
uses_memory: false
@@ -356,9 +386,9 @@
entry: Instance::stringCompare
export: compare
params:
- - 'ValType(RefType::extern_())'
- - 'ValType(RefType::extern_())'
- result: 'ValType::i32()'
+ - 'externref'
+ - 'externref'
+ result: 'i32'
fail_mode: FailOnMaxI32
uses_memory: false
diff --git a/js/src/wasm/WasmBuiltins.cpp b/js/src/wasm/WasmBuiltins.cpp
index 08024c3dfe..7b03494bcd 100644
--- a/js/src/wasm/WasmBuiltins.cpp
+++ b/js/src/wasm/WasmBuiltins.cpp
@@ -397,9 +397,9 @@ const SymbolicAddressSignature SASigArrayCopy = {
#define VISIT_BUILTIN_FUNC(op, export, sa_name, ...) \
const SymbolicAddressSignature SASig##sa_name = { \
SymbolicAddress::sa_name, \
- DECLARE_BUILTIN_MODULE_FUNC_RESULT_SASTYPE_##op, \
+ DECLARE_BUILTIN_MODULE_FUNC_RESULT_MIRTYPE_##op, \
DECLARE_BUILTIN_MODULE_FUNC_FAILMODE_##op, \
- DECLARE_BUILTIN_MODULE_FUNC_PARAM_SASTYPES_##op};
+ DECLARE_BUILTIN_MODULE_FUNC_PARAM_MIRTYPES_##op};
FOR_EACH_BUILTIN_MODULE_FUNC(VISIT_BUILTIN_FUNC)
#undef VISIT_BUILTIN_FUNC
@@ -1839,7 +1839,7 @@ using TypedNativeToCodeRangeMap =
HashMap<TypedNative, uint32_t, TypedNative, SystemAllocPolicy>;
using SymbolicAddressToCodeRangeArray =
- EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, uint32_t>;
+ EnumeratedArray<SymbolicAddress, uint32_t, size_t(SymbolicAddress::Limit)>;
struct BuiltinThunks {
uint8_t* codeBase;
diff --git a/js/src/wasm/WasmCode.cpp b/js/src/wasm/WasmCode.cpp
index b7aaa1869c..7fe2562ab6 100644
--- a/js/src/wasm/WasmCode.cpp
+++ b/js/src/wasm/WasmCode.cpp
@@ -1085,6 +1085,23 @@ bool Code::lookupTrap(void* pc, Trap* trapOut, BytecodeOffset* bytecode) const {
return false;
}
+bool Code::lookupFunctionTier(const CodeRange* codeRange, Tier* tier) const {
+ // This logic only works if the codeRange is a function, and therefore only
+ // exists in metadata and not a lazy stub tier. Generalizing to access lazy
+ // stubs would require taking a lock, which is undesirable for the profiler.
+ MOZ_ASSERT(codeRange->isFunction());
+ for (Tier t : tiers()) {
+ const CodeTier& code = codeTier(t);
+ const MetadataTier& metadata = code.metadata();
+ if (codeRange >= metadata.codeRanges.begin() &&
+ codeRange < metadata.codeRanges.end()) {
+ *tier = t;
+ return true;
+ }
+ }
+ return false;
+}
+
struct UnwindInfoPCOffset {
const CodeRangeUnwindInfoVector& info;
explicit UnwindInfoPCOffset(const CodeRangeUnwindInfoVector& info)
diff --git a/js/src/wasm/WasmCode.h b/js/src/wasm/WasmCode.h
index a34a462127..e03a2f596e 100644
--- a/js/src/wasm/WasmCode.h
+++ b/js/src/wasm/WasmCode.h
@@ -117,8 +117,8 @@ struct LinkData : LinkDataCacheablePod {
};
using InternalLinkVector = Vector<InternalLink, 0, SystemAllocPolicy>;
- struct SymbolicLinkArray
- : EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
+ struct SymbolicLinkArray : EnumeratedArray<SymbolicAddress, Uint32Vector,
+ size_t(SymbolicAddress::Limit)> {
size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
};
@@ -857,6 +857,7 @@ class Code : public ShareableBase<Code> {
bool containsCodePC(const void* pc) const;
bool lookupTrap(void* pc, Trap* trap, BytecodeOffset* bytecode) const;
const CodeRangeUnwindInfo* lookupUnwindInfo(void* pc) const;
+ bool lookupFunctionTier(const CodeRange* codeRange, Tier* tier) const;
// To save memory, profilingLabels_ are generated lazily when profiling mode
// is enabled.
diff --git a/js/src/wasm/WasmCodegenTypes.h b/js/src/wasm/WasmCodegenTypes.h
index 815292dd23..590572ae8a 100644
--- a/js/src/wasm/WasmCodegenTypes.h
+++ b/js/src/wasm/WasmCodegenTypes.h
@@ -273,7 +273,7 @@ WASM_DECLARE_CACHEABLE_POD(TrapSite);
WASM_DECLARE_POD_VECTOR(TrapSite, TrapSiteVector)
struct TrapSiteVectorArray
- : EnumeratedArray<Trap, Trap::Limit, TrapSiteVector> {
+ : EnumeratedArray<Trap, TrapSiteVector, size_t(Trap::Limit)> {
bool empty() const;
void clear();
void swap(TrapSiteVectorArray& rhs);
diff --git a/js/src/wasm/WasmCompile.cpp b/js/src/wasm/WasmCompile.cpp
index 2418340684..fbf4df3e71 100644
--- a/js/src/wasm/WasmCompile.cpp
+++ b/js/src/wasm/WasmCompile.cpp
@@ -726,8 +726,8 @@ void CompilerEnvironment::computeParameters(Decoder& d) {
state_ = Computed;
}
-template <class DecoderT>
-static bool DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg,
+template <class DecoderT, class ModuleGeneratorT>
+static bool DecodeFunctionBody(DecoderT& d, ModuleGeneratorT& mg,
uint32_t funcIndex) {
uint32_t bodySize;
if (!d.readVarU32(&bodySize)) {
@@ -751,9 +751,9 @@ static bool DecodeFunctionBody(DecoderT& d, ModuleGenerator& mg,
bodyBegin + bodySize);
}
-template <class DecoderT>
+template <class DecoderT, class ModuleGeneratorT>
static bool DecodeCodeSection(const ModuleEnvironment& env, DecoderT& d,
- ModuleGenerator& mg) {
+ ModuleGeneratorT& mg) {
if (!env.codeSection) {
if (env.numFuncDefs() != 0) {
return d.fail("expected code section");
@@ -996,3 +996,46 @@ SharedModule wasm::CompileStreaming(
return mg.finishModule(*bytecode, streamEnd.tier2Listener);
}
+
+class DumpIonModuleGenerator {
+ private:
+ ModuleEnvironment& moduleEnv_;
+ uint32_t targetFuncIndex_;
+ IonDumpContents contents_;
+ GenericPrinter& out_;
+ UniqueChars* error_;
+
+ public:
+ DumpIonModuleGenerator(ModuleEnvironment& moduleEnv, uint32_t targetFuncIndex,
+ IonDumpContents contents, GenericPrinter& out,
+ UniqueChars* error)
+ : moduleEnv_(moduleEnv),
+ targetFuncIndex_(targetFuncIndex),
+ contents_(contents),
+ out_(out),
+ error_(error) {}
+
+ bool finishFuncDefs() { return true; }
+ bool compileFuncDef(uint32_t funcIndex, uint32_t lineOrBytecode,
+ const uint8_t* begin, const uint8_t* end) {
+ if (funcIndex != targetFuncIndex_) {
+ return true;
+ }
+
+ FuncCompileInput input(funcIndex, lineOrBytecode, begin, end,
+ Uint32Vector());
+ return IonDumpFunction(moduleEnv_, input, contents_, out_, error_);
+ }
+};
+
+bool wasm::DumpIonFunctionInModule(const ShareableBytes& bytecode,
+ uint32_t targetFuncIndex,
+ IonDumpContents contents,
+ GenericPrinter& out, UniqueChars* error) {
+ UniqueCharsVector warnings;
+ Decoder d(bytecode.bytes, 0, error, &warnings);
+ ModuleEnvironment moduleEnv(FeatureArgs::allEnabled());
+ DumpIonModuleGenerator mg(moduleEnv, targetFuncIndex, contents, out, error);
+ return moduleEnv.init() && DecodeModuleEnvironment(d, &moduleEnv) &&
+ DecodeCodeSection(moduleEnv, d, mg);
+}
diff --git a/js/src/wasm/WasmCompile.h b/js/src/wasm/WasmCompile.h
index 2b07881eea..f39dc09fb9 100644
--- a/js/src/wasm/WasmCompile.h
+++ b/js/src/wasm/WasmCompile.h
@@ -93,6 +93,19 @@ SharedModule CompileStreaming(const CompileArgs& args, const Bytes& envBytes,
const Atomic<bool>& cancelled, UniqueChars* error,
UniqueCharsVector* warnings);
+// What to print out from dumping a function from Ion.
+enum class IonDumpContents {
+ UnoptimizedMIR,
+ OptimizedMIR,
+ LIR,
+
+ Default = UnoptimizedMIR,
+};
+
+bool DumpIonFunctionInModule(const ShareableBytes& bytecode,
+ uint32_t targetFuncIndex, IonDumpContents contents,
+ GenericPrinter& out, UniqueChars* error);
+
} // namespace wasm
} // namespace js
diff --git a/js/src/wasm/WasmCompileArgs.h b/js/src/wasm/WasmCompileArgs.h
index 1bf9e60d13..af85026b93 100644
--- a/js/src/wasm/WasmCompileArgs.h
+++ b/js/src/wasm/WasmCompileArgs.h
@@ -103,6 +103,15 @@ struct FeatureArgs {
FeatureArgs(FeatureArgs&&) = default;
static FeatureArgs build(JSContext* cx, const FeatureOptions& options);
+ static FeatureArgs allEnabled() {
+ FeatureArgs args;
+#define WASM_FEATURE(NAME, LOWER_NAME, ...) args.LOWER_NAME = true;
+ JS_FOR_WASM_FEATURES(WASM_FEATURE)
+#undef WASM_FEATURE
+ args.sharedMemory = Shareable::True;
+ args.simd = true;
+ return args;
+ }
#define WASM_FEATURE(NAME, LOWER_NAME, ...) bool LOWER_NAME;
JS_FOR_WASM_FEATURES(WASM_FEATURE)
diff --git a/js/src/wasm/WasmFeatures.cpp b/js/src/wasm/WasmFeatures.cpp
index 05804353ae..24ab1c7d51 100644
--- a/js/src/wasm/WasmFeatures.cpp
+++ b/js/src/wasm/WasmFeatures.cpp
@@ -21,6 +21,7 @@
#include "jit/AtomicOperations.h"
#include "jit/JitContext.h"
#include "jit/JitOptions.h"
+#include "js/Prefs.h"
#include "util/StringBuffer.h"
#include "vm/JSContext.h"
#include "vm/Realm.h"
@@ -56,13 +57,13 @@ static inline bool WasmThreadsFlag(JSContext* cx) {
JS_FOR_WASM_FEATURES(WASM_FEATURE);
#undef WASM_FEATURE
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- FLAG_PRED, FLAG_FORCE_ON, ...) \
- static inline bool Wasm##NAME##Flag(JSContext* cx) { \
- if (!(COMPILE_PRED)) { \
- return false; \
- } \
- return ((FLAG_PRED) && cx->options().wasm##NAME()) || (FLAG_FORCE_ON); \
+#define WASM_FEATURE(NAME, LOWER_NAME, COMPILE_PRED, COMPILER_PRED, FLAG_PRED, \
+ FLAG_FORCE_ON, FLAG_FUZZ_ON, PREF) \
+ static inline bool Wasm##NAME##Flag(JSContext* cx) { \
+ if (!(COMPILE_PRED)) { \
+ return false; \
+ } \
+ return ((FLAG_PRED) && JS::Prefs::wasm_##PREF()) || (FLAG_FORCE_ON); \
}
JS_FOR_WASM_FEATURES(WASM_FEATURE);
#undef WASM_FEATURE
@@ -219,10 +220,9 @@ bool wasm::AnyCompilerAvailable(JSContext* cx) {
// compiler that can support the feature. Subsequent compiler selection must
// ensure that only compilers that actually support the feature are used.
-#define WASM_FEATURE(NAME, LOWER_NAME, STAGE, COMPILE_PRED, COMPILER_PRED, \
- ...) \
- bool wasm::NAME##Available(JSContext* cx) { \
- return Wasm##NAME##Flag(cx) && (COMPILER_PRED); \
+#define WASM_FEATURE(NAME, LOWER_NAME, COMPILE_PRED, COMPILER_PRED, ...) \
+ bool wasm::NAME##Available(JSContext* cx) { \
+ return Wasm##NAME##Flag(cx) && (COMPILER_PRED); \
}
JS_FOR_WASM_FEATURES(WASM_FEATURE)
#undef WASM_FEATURE
diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp
index 171ac285be..90555720da 100644
--- a/js/src/wasm/WasmFrameIter.cpp
+++ b/js/src/wasm/WasmFrameIter.cpp
@@ -1882,3 +1882,16 @@ const char* ProfilingFrameIterator::label() const {
MOZ_CRASH("bad code range kind");
}
+
+ProfilingFrameIterator::Category ProfilingFrameIterator::category() const {
+ if (!exitReason_.isFixed() || !exitReason_.isNone() ||
+ !codeRange_->isFunction()) {
+ return Category::Other;
+ }
+
+ Tier tier;
+ if (!code_->lookupFunctionTier(codeRange_, &tier)) {
+ return Category::Other;
+ }
+ return tier == Tier::Optimized ? Category::Ion : Category::Baseline;
+}
diff --git a/js/src/wasm/WasmFrameIter.h b/js/src/wasm/WasmFrameIter.h
index 014f5de0ef..59590b1b2a 100644
--- a/js/src/wasm/WasmFrameIter.h
+++ b/js/src/wasm/WasmFrameIter.h
@@ -196,6 +196,12 @@ class ProfilingFrameIterator {
ProfilingFrameIterator(const jit::JitActivation& activation,
const RegisterState& state);
+ enum Category {
+ Baseline,
+ Ion,
+ Other,
+ };
+
void operator++();
bool done() const {
@@ -213,6 +219,8 @@ class ProfilingFrameIterator {
}
const char* label() const;
+ Category category() const;
+
void* endStackAddress() const { return endStackAddress_; }
};
diff --git a/js/src/wasm/WasmGcObject-inl.h b/js/src/wasm/WasmGcObject-inl.h
index 17800f41f1..4714aafc06 100644
--- a/js/src/wasm/WasmGcObject-inl.h
+++ b/js/src/wasm/WasmGcObject-inl.h
@@ -342,8 +342,7 @@ MOZ_ALWAYS_INLINE WasmArrayObject* WasmArrayObject::createArray(
calcStorageBytesChecked(typeDefData->arrayElemSize, numElements);
if (!storageBytes.isValid() ||
storageBytes.value() > uint32_t(wasm::MaxArrayPayloadBytes)) {
- JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
- JSMSG_WASM_ARRAY_IMP_LIMIT);
+ wasm::ReportTrapError(cx, JSMSG_WASM_ARRAY_IMP_LIMIT);
return nullptr;
}
diff --git a/js/src/wasm/WasmGenerator.cpp b/js/src/wasm/WasmGenerator.cpp
index a470626de4..338812e1d6 100644
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -440,7 +440,7 @@ static bool InRange(uint32_t caller, uint32_t callee) {
using OffsetMap =
HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>;
using TrapMaybeOffsetArray =
- EnumeratedArray<Trap, Trap::Limit, Maybe<uint32_t>>;
+ EnumeratedArray<Trap, Maybe<uint32_t>, size_t(Trap::Limit)>;
bool ModuleGenerator::linkCallSites() {
AutoCreatedBy acb(masm_, "linkCallSites");
diff --git a/js/src/wasm/WasmInitExpr.cpp b/js/src/wasm/WasmInitExpr.cpp
index e8c49cbc31..581eca2f62 100644
--- a/js/src/wasm/WasmInitExpr.cpp
+++ b/js/src/wasm/WasmInitExpr.cpp
@@ -74,20 +74,16 @@ class MOZ_STACK_CLASS InitExprInterpreter {
return stack.append(Val(RefType::func(), ref));
}
-#if defined(ENABLE_WASM_EXTENDED_CONST) || defined(ENABLE_WASM_GC)
int32_t popI32() {
uint32_t result = stack.back().i32();
stack.popBack();
return int32_t(result);
}
-#endif
-#ifdef ENABLE_WASM_EXTENDED_CONST
int64_t popI64() {
uint64_t result = stack.back().i64();
stack.popBack();
return int64_t(result);
}
-#endif
bool evalGlobalGet(JSContext* cx, uint32_t index) {
RootedVal val(cx);
@@ -107,7 +103,6 @@ class MOZ_STACK_CLASS InitExprInterpreter {
return pushFuncRef(func);
}
bool evalRefNull(RefType type) { return pushRef(type, AnyRef::null()); }
-#ifdef ENABLE_WASM_EXTENDED_CONST
bool evalI32Add() {
uint32_t b = popI32();
uint32_t a = popI32();
@@ -138,7 +133,6 @@ class MOZ_STACK_CLASS InitExprInterpreter {
uint64_t a = popI64();
return pushI64(a * b);
}
-#endif // ENABLE_WASM_EXTENDED_CONST
#ifdef ENABLE_WASM_GC
bool evalStructNew(JSContext* cx, uint32_t typeIndex) {
const TypeDef& typeDef = instance().metadata().types->type(typeIndex);
@@ -320,7 +314,6 @@ bool InitExprInterpreter::evaluate(JSContext* cx, Decoder& d) {
}
CHECK(evalRefNull(type));
}
-#ifdef ENABLE_WASM_EXTENDED_CONST
case uint16_t(Op::I32Add): {
if (!d.readBinary()) {
return false;
@@ -357,7 +350,6 @@ bool InitExprInterpreter::evaluate(JSContext* cx, Decoder& d) {
}
CHECK(evalI64Mul());
}
-#endif
#ifdef ENABLE_WASM_GC
case uint16_t(Op::GcPrefix): {
switch (op.b1) {
@@ -449,9 +441,7 @@ bool wasm::DecodeConstantExpression(Decoder& d, ModuleEnvironment* env,
return false;
}
-#if defined(ENABLE_WASM_EXTENDED_CONST) || defined(ENABLE_WASM_GC)
Nothing nothing;
-#endif
NothingVector nothings{};
ResultType unusedType;
@@ -542,13 +532,9 @@ bool wasm::DecodeConstantExpression(Decoder& d, ModuleEnvironment* env,
*literal = Some(LitVal(ValType(type)));
break;
}
-#ifdef ENABLE_WASM_EXTENDED_CONST
case uint16_t(Op::I32Add):
case uint16_t(Op::I32Sub):
case uint16_t(Op::I32Mul): {
- if (!env->extendedConstEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
if (!iter.readBinary(ValType::I32, &nothing, &nothing)) {
return false;
}
@@ -558,16 +544,12 @@ bool wasm::DecodeConstantExpression(Decoder& d, ModuleEnvironment* env,
case uint16_t(Op::I64Add):
case uint16_t(Op::I64Sub):
case uint16_t(Op::I64Mul): {
- if (!env->extendedConstEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
if (!iter.readBinary(ValType::I64, &nothing, &nothing)) {
return false;
}
*literal = Nothing();
break;
}
-#endif
#ifdef ENABLE_WASM_GC
case uint16_t(Op::GcPrefix): {
if (!env->gcEnabled()) {
@@ -663,6 +645,7 @@ bool InitExpr::decodeAndValidate(Decoder& d, ModuleEnvironment* env,
expr->type_ = expected;
if (literal) {
+ literal->unsafeSetType(expected);
expr->kind_ = InitExprKind::Literal;
expr->literal_ = *literal;
return true;
diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
index bf25b58c14..d025c02c16 100644
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -1531,8 +1531,10 @@ static bool ArrayCopyFromData(JSContext* cx, Handle<WasmArrayObject*> arrayObj,
// Because `numBytesToCopy` is an in-range `CheckedUint32`, the cast to
// `size_t` is safe even on a 32-bit target.
- memcpy(arrayObj->data_, &seg->bytes[segByteOffset],
- size_t(numBytesToCopy.value()));
+ if (numElements != 0) {
+ memcpy(arrayObj->data_, &seg->bytes[segByteOffset],
+ size_t(numBytesToCopy.value()));
+ }
return true;
}
@@ -1948,35 +1950,42 @@ static bool ArrayCopyFromElem(JSContext* cx, Handle<WasmArrayObject*> arrayObj,
// take into account the enclosing recursion group of the type. This is
// temporary until builtin module functions can specify a precise array type
// for params/results.
-static WasmArrayObject* CastToI16Array(HandleAnyRef ref, bool needMutable) {
- if (!ref.isJSObject()) {
- return nullptr;
- }
+template <bool isMutable>
+static WasmArrayObject* UncheckedCastToArrayI16(HandleAnyRef ref) {
JSObject& object = ref.toJSObject();
- if (!object.is<WasmArrayObject>()) {
- return nullptr;
- }
WasmArrayObject& array = object.as<WasmArrayObject>();
- const ArrayType& type = array.typeDef().arrayType();
- if (type.elementType_ != StorageType::I16) {
- return nullptr;
+ DebugOnly<const ArrayType*> type(&array.typeDef().arrayType());
+ MOZ_ASSERT(type->elementType_ == StorageType::I16);
+ MOZ_ASSERT(type->isMutable_ == isMutable);
+ return &array;
+}
+
+/* static */
+int32_t Instance::stringTest(Instance* instance, void* stringArg) {
+ AnyRef string = AnyRef::fromCompiledCode(stringArg);
+ if (string.isNull() || !string.isJSString()) {
+ return 0;
}
- if (needMutable && !type.isMutable_) {
+ return 1;
+}
+
+/* static */
+void* Instance::stringCast(Instance* instance, void* stringArg) {
+ AnyRef string = AnyRef::fromCompiledCode(stringArg);
+ if (string.isNull() || !string.isJSString()) {
+ ReportTrapError(instance->cx(), JSMSG_WASM_BAD_CAST);
return nullptr;
}
- return &array;
+ return string.forCompiledCode();
}
/* static */
-void* Instance::stringFromWTF16Array(Instance* instance, void* arrayArg,
- uint32_t arrayStart, uint32_t arrayCount) {
+void* Instance::stringFromCharCodeArray(Instance* instance, void* arrayArg,
+ uint32_t arrayStart,
+ uint32_t arrayCount) {
JSContext* cx = instance->cx();
RootedAnyRef arrayRef(cx, AnyRef::fromCompiledCode(arrayArg));
- Rooted<WasmArrayObject*> array(cx);
- if (!(array = CastToI16Array(arrayRef, false))) {
- ReportTrapError(cx, JSMSG_WASM_BAD_CAST);
- return nullptr;
- }
+ Rooted<WasmArrayObject*> array(cx, UncheckedCastToArrayI16<true>(arrayRef));
CheckedUint32 lastIndexPlus1 =
CheckedUint32(arrayStart) + CheckedUint32(arrayCount);
@@ -1997,8 +2006,8 @@ void* Instance::stringFromWTF16Array(Instance* instance, void* arrayArg,
}
/* static */
-int32_t Instance::stringToWTF16Array(Instance* instance, void* stringArg,
- void* arrayArg, uint32_t arrayStart) {
+int32_t Instance::stringIntoCharCodeArray(Instance* instance, void* stringArg,
+ void* arrayArg, uint32_t arrayStart) {
JSContext* cx = instance->cx();
AnyRef stringRef = AnyRef::fromCompiledCode(stringArg);
if (!stringRef.isJSString()) {
@@ -2009,11 +2018,7 @@ int32_t Instance::stringToWTF16Array(Instance* instance, void* stringArg,
size_t stringLength = string->length();
RootedAnyRef arrayRef(cx, AnyRef::fromCompiledCode(arrayArg));
- Rooted<WasmArrayObject*> array(cx);
- if (!(array = CastToI16Array(arrayRef, true))) {
- ReportTrapError(cx, JSMSG_WASM_BAD_CAST);
- return -1;
- }
+ Rooted<WasmArrayObject*> array(cx, UncheckedCastToArrayI16<true>(arrayRef));
CheckedUint32 lastIndexPlus1 = CheckedUint32(arrayStart) + stringLength;
if (!lastIndexPlus1.isValid() ||
@@ -2120,8 +2125,8 @@ int32_t Instance::stringLength(Instance* instance, void* stringArg) {
return (int32_t)stringRef.toJSString()->length();
}
-void* Instance::stringConcatenate(Instance* instance, void* firstStringArg,
- void* secondStringArg) {
+void* Instance::stringConcat(Instance* instance, void* firstStringArg,
+ void* secondStringArg) {
JSContext* cx = instance->cx();
AnyRef firstStringRef = AnyRef::fromCompiledCode(firstStringArg);
@@ -2444,11 +2449,10 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
if (global.isIndirect()) {
// Initialize the cell
- wasm::GCPtrVal& cell = globalObjs[i]->val();
- cell = val.get();
+ globalObjs[i]->setVal(val);
+
// Link to the cell
- void* address = (void*)&cell.get().cell();
- *(void**)globalAddr = address;
+ *(void**)globalAddr = globalObjs[i]->addressOfCell();
} else {
val.get().writeToHeapLocation(globalAddr);
}
@@ -2539,6 +2543,7 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
size_t numWords = std::max<size_t>((numFuncs + 31) / 32, 1);
debugFilter_ = (uint32_t*)js_calloc(numWords, sizeof(uint32_t));
if (!debugFilter_) {
+ ReportOutOfMemory(cx);
return false;
}
}
@@ -2552,6 +2557,7 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
// Take references to the passive data segments
if (!passiveDataSegments_.resize(dataSegments.length())) {
+ ReportOutOfMemory(cx);
return false;
}
for (size_t i = 0; i < dataSegments.length(); i++) {
@@ -2563,6 +2569,7 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
// Create InstanceElemSegments for any passive element segments, since these
// are the ones available at runtime.
if (!passiveElemSegments_.resize(elemSegments.length())) {
+ ReportOutOfMemory(cx);
return false;
}
for (size_t i = 0; i < elemSegments.length(); i++) {
@@ -2571,6 +2578,7 @@ bool Instance::init(JSContext* cx, const JSObjectVector& funcImports,
passiveElemSegments_[i] = InstanceElemSegment();
InstanceElemSegment& instanceSeg = passiveElemSegments_[i];
if (!instanceSeg.reserve(seg.numElements())) {
+ ReportOutOfMemory(cx);
return false;
}
diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
index dcc586b14f..074c6212df 100644
--- a/js/src/wasm/WasmInstance.h
+++ b/js/src/wasm/WasmInstance.h
@@ -571,10 +571,13 @@ class alignas(16) Instance {
static int32_t intrI8VecMul(Instance* instance, uint32_t dest, uint32_t src1,
uint32_t src2, uint32_t len, uint8_t* memBase);
- static void* stringFromWTF16Array(Instance* instance, void* arrayArg,
- uint32_t arrayStart, uint32_t arrayCount);
- static int32_t stringToWTF16Array(Instance* instance, void* stringArg,
- void* arrayArg, uint32_t start);
+ static int32_t stringTest(Instance* instance, void* stringArg);
+ static void* stringCast(Instance* instance, void* stringArg);
+ static void* stringFromCharCodeArray(Instance* instance, void* arrayArg,
+ uint32_t arrayStart,
+ uint32_t arrayCount);
+ static int32_t stringIntoCharCodeArray(Instance* instance, void* stringArg,
+ void* arrayArg, uint32_t arrayStart);
static void* stringFromCharCode(Instance* instance, uint32_t charCode);
static void* stringFromCodePoint(Instance* instance, uint32_t codePoint);
static int32_t stringCharCodeAt(Instance* instance, void* stringArg,
@@ -582,8 +585,8 @@ class alignas(16) Instance {
static int32_t stringCodePointAt(Instance* instance, void* stringArg,
uint32_t index);
static int32_t stringLength(Instance* instance, void* stringArg);
- static void* stringConcatenate(Instance* instance, void* firstStringArg,
- void* secondStringArg);
+ static void* stringConcat(Instance* instance, void* firstStringArg,
+ void* secondStringArg);
static void* stringSubstring(Instance* instance, void* stringArg,
int32_t startIndex, int32_t endIndex);
static int32_t stringEquals(Instance* instance, void* firstStringArg,
diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
index 6fbfeb3809..0568a95804 100644
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -900,7 +900,7 @@ class FunctionCompiler {
return true;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
[[nodiscard]] bool brOnNull(uint32_t relativeDepth, const DefVector& values,
const ResultType& type, MDefinition* condition) {
if (inDeadCode()) {
@@ -963,7 +963,7 @@ class FunctionCompiler {
return true;
}
-#endif // ENABLE_WASM_FUNCTION_REFERENCES
+#endif // ENABLE_WASM_GC
#ifdef ENABLE_WASM_GC
MDefinition* refI31(MDefinition* input) {
@@ -2006,10 +2006,10 @@ class FunctionCompiler {
MOZ_CRASH("Unknown ABIArg kind.");
}
- template <typename SpanT>
- [[nodiscard]] bool passArgs(const DefVector& argDefs, SpanT types,
+ template <typename VecT>
+ [[nodiscard]] bool passArgs(const DefVector& argDefs, const VecT& types,
CallCompileState* call) {
- MOZ_ASSERT(argDefs.length() == types.size());
+ MOZ_ASSERT(argDefs.length() == types.length());
for (uint32_t i = 0; i < argDefs.length(); i++) {
MDefinition* def = argDefs[i];
ValType type = types[i];
@@ -2447,7 +2447,7 @@ class FunctionCompiler {
return collectUnaryCallResult(builtin.retType, def);
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
[[nodiscard]] bool callRef(const FuncType& funcType, MDefinition* ref,
uint32_t lineOrBytecode,
const CallCompileState& call, DefVector* results) {
@@ -2489,7 +2489,7 @@ class FunctionCompiler {
# endif // ENABLE_WASM_TAIL_CALLS
-#endif // ENABLE_WASM_FUNCTION_REFERENCES
+#endif // ENABLE_WASM_GC
/*********************************************** Control flow generation */
@@ -2788,7 +2788,8 @@ class FunctionCompiler {
// patches around.
for (uint32_t depth = 0; depth < iter().controlStackDepth(); depth++) {
LabelKind kind = iter().controlKind(depth);
- if (kind != LabelKind::Try && kind != LabelKind::Body) {
+ if (kind != LabelKind::Try && kind != LabelKind::TryTable &&
+ kind != LabelKind::Body) {
continue;
}
Control& control = iter().controlItem(depth);
@@ -5440,7 +5441,7 @@ static bool EmitReturnCallIndirect(FunctionCompiler& f) {
}
#endif
-#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
+#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_GC)
static bool EmitReturnCallRef(FunctionCompiler& f) {
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
@@ -7090,7 +7091,11 @@ static bool EmitLoadSplatSimd128(FunctionCompiler& f, Scalar::Type viewType,
return false;
}
- f.iter().setResult(f.loadSplatSimd128(viewType, addr, splatOp));
+ auto* ins = f.loadSplatSimd128(viewType, addr, splatOp);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+ f.iter().setResult(ins);
return true;
}
@@ -7100,7 +7105,11 @@ static bool EmitLoadExtendSimd128(FunctionCompiler& f, wasm::SimdOp op) {
return false;
}
- f.iter().setResult(f.loadExtendSimd128(addr, op));
+ auto* ins = f.loadExtendSimd128(addr, op);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+ f.iter().setResult(ins);
return true;
}
@@ -7111,7 +7120,11 @@ static bool EmitLoadZeroSimd128(FunctionCompiler& f, Scalar::Type viewType,
return false;
}
- f.iter().setResult(f.loadZeroSimd128(viewType, numBytes, addr));
+ auto* ins = f.loadZeroSimd128(viewType, numBytes, addr);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+ f.iter().setResult(ins);
return true;
}
@@ -7123,7 +7136,11 @@ static bool EmitLoadLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
return false;
}
- f.iter().setResult(f.loadLaneSimd128(laneSize, addr, laneIndex, src));
+ auto* ins = f.loadLaneSimd128(laneSize, addr, laneIndex, src);
+ if (!f.inDeadCode() && !ins) {
+ return false;
+ }
+ f.iter().setResult(ins);
return true;
}
@@ -7141,7 +7158,7 @@ static bool EmitStoreLaneSimd128(FunctionCompiler& f, uint32_t laneSize) {
#endif // ENABLE_WASM_SIMD
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
static bool EmitRefAsNonNull(FunctionCompiler& f) {
MDefinition* ref;
if (!f.iter().readRefAsNonNull(&ref)) {
@@ -7204,7 +7221,7 @@ static bool EmitCallRef(FunctionCompiler& f) {
return true;
}
-#endif // ENABLE_WASM_FUNCTION_REFERENCES
+#endif // ENABLE_WASM_GC
#ifdef ENABLE_WASM_GC
@@ -7917,18 +7934,18 @@ static bool EmitCallBuiltinModuleFunc(FunctionCompiler& f) {
}
uint32_t bytecodeOffset = f.readBytecodeOffset();
- const SymbolicAddressSignature& callee = builtinModuleFunc->signature;
+ const SymbolicAddressSignature& callee = *builtinModuleFunc->sig();
CallCompileState args;
if (!f.passInstance(callee.argTypes[0], &args)) {
return false;
}
- if (!f.passArgs(params, builtinModuleFunc->params, &args)) {
+ if (!f.passArgs(params, builtinModuleFunc->funcType()->args(), &args)) {
return false;
}
- if (builtinModuleFunc->usesMemory) {
+ if (builtinModuleFunc->usesMemory()) {
MDefinition* memoryBase = f.memoryBase(0);
if (!f.passArg(memoryBase, MIRType::Pointer, &args)) {
return false;
@@ -7939,7 +7956,7 @@ static bool EmitCallBuiltinModuleFunc(FunctionCompiler& f) {
return false;
}
- bool hasResult = builtinModuleFunc->result.isSome();
+ bool hasResult = !builtinModuleFunc->funcType()->results().empty();
MDefinition* result = nullptr;
MDefinition** resultOutParam = hasResult ? &result : nullptr;
if (!f.builtinInstanceMethodCall(callee, bytecodeOffset, args,
@@ -7996,37 +8013,19 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
case uint16_t(Op::Else):
CHECK(EmitElse(f));
case uint16_t(Op::Try):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitTry(f));
case uint16_t(Op::Catch):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitCatch(f));
case uint16_t(Op::CatchAll):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitCatchAll(f));
case uint16_t(Op::Delegate):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
if (!EmitDelegate(f)) {
return false;
}
break;
case uint16_t(Op::Throw):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitThrow(f));
case uint16_t(Op::Rethrow):
- if (!f.moduleEnv().exceptionsEnabled()) {
- return f.iter().unrecognizedOpcode(&op);
- }
CHECK(EmitRethrow(f));
case uint16_t(Op::ThrowRef):
if (!f.moduleEnv().exnrefEnabled()) {
@@ -8474,36 +8473,35 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
}
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefAsNonNull):
- if (!f.moduleEnv().functionReferencesEnabled()) {
+ if (!f.moduleEnv().gcEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitRefAsNonNull(f));
case uint16_t(Op::BrOnNull): {
- if (!f.moduleEnv().functionReferencesEnabled()) {
+ if (!f.moduleEnv().gcEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitBrOnNull(f));
}
case uint16_t(Op::BrOnNonNull): {
- if (!f.moduleEnv().functionReferencesEnabled()) {
+ if (!f.moduleEnv().gcEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitBrOnNonNull(f));
}
case uint16_t(Op::CallRef): {
- if (!f.moduleEnv().functionReferencesEnabled()) {
+ if (!f.moduleEnv().gcEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitCallRef(f));
}
#endif
-#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
+#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_GC)
case uint16_t(Op::ReturnCallRef): {
- if (!f.moduleEnv().functionReferencesEnabled() ||
- !f.moduleEnv().tailCallsEnabled()) {
+ if (!f.moduleEnv().gcEnabled() || !f.moduleEnv().tailCallsEnabled()) {
return f.iter().unrecognizedOpcode(&op);
}
CHECK(EmitReturnCallRef(f));
@@ -9025,114 +9023,91 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
CHECK(EmitAtomicStore(f, ValType::I64, Scalar::Uint32));
case uint32_t(ThreadOp::I32AtomicAdd):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
- AtomicFetchAddOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
- AtomicFetchAddOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicAdd8U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
- AtomicFetchAddOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicAdd16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchAddOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd8U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
- AtomicFetchAddOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchAddOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::Add));
case uint32_t(ThreadOp::I64AtomicAdd32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchAddOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::Add));
case uint32_t(ThreadOp::I32AtomicSub):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
- AtomicFetchSubOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
- AtomicFetchSubOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicSub8U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
- AtomicFetchSubOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicSub16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchSubOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub8U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
- AtomicFetchSubOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchSubOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::Sub));
case uint32_t(ThreadOp::I64AtomicSub32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchSubOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::Sub));
case uint32_t(ThreadOp::I32AtomicAnd):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
- AtomicFetchAndOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
- AtomicFetchAndOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicAnd8U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
- AtomicFetchAndOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicAnd16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchAndOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd8U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
- AtomicFetchAndOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchAndOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::And));
case uint32_t(ThreadOp::I64AtomicAnd32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchAndOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::And));
case uint32_t(ThreadOp::I32AtomicOr):
- CHECK(
- EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr):
- CHECK(
- EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicOr8U):
- CHECK(
- EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicOr16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr8U):
- CHECK(
- EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::Or));
case uint32_t(ThreadOp::I64AtomicOr32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchOrOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::Or));
case uint32_t(ThreadOp::I32AtomicXor):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32,
- AtomicFetchXorOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Int32, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64,
- AtomicFetchXorOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Int64, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXor8U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8,
- AtomicFetchXorOp));
+ CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint8, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXor16U):
- CHECK(EmitAtomicRMW(f, ValType::I32, Scalar::Uint16,
- AtomicFetchXorOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I32, Scalar::Uint16, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor8U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8,
- AtomicFetchXorOp));
+ CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint8, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor16U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint16,
- AtomicFetchXorOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint16, AtomicOp::Xor));
case uint32_t(ThreadOp::I64AtomicXor32U):
- CHECK(EmitAtomicRMW(f, ValType::I64, Scalar::Uint32,
- AtomicFetchXorOp));
+ CHECK(
+ EmitAtomicRMW(f, ValType::I64, Scalar::Uint32, AtomicOp::Xor));
case uint32_t(ThreadOp::I32AtomicXchg):
CHECK(EmitAtomicXchg(f, ValType::I32, Scalar::Int32));
@@ -9267,6 +9242,41 @@ static bool EmitBodyExprs(FunctionCompiler& f) {
#undef CHECK
}
+static bool IonBuildMIR(Decoder& d, const ModuleEnvironment& moduleEnv,
+ const FuncCompileInput& func,
+ const ValTypeVector& locals, MIRGenerator& mir,
+ TryNoteVector& tryNotes, FeatureUsage* observedFeatures,
+ UniqueChars* error) {
+ // Initialize MIR global information used for optimization
+ if (moduleEnv.numMemories() > 0) {
+ if (moduleEnv.memories[0].indexType() == IndexType::I32) {
+ mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength32());
+ } else {
+ mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength64());
+ }
+ }
+
+ // Build MIR graph
+ FunctionCompiler f(moduleEnv, d, func, locals, mir, tryNotes);
+ if (!f.init()) {
+ return false;
+ }
+
+ if (!f.startBlock()) {
+ return false;
+ }
+
+ if (!EmitBodyExprs(f)) {
+ return false;
+ }
+
+ f.finish();
+
+ *observedFeatures = f.featureUsage();
+
+ return true;
+}
+
bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
const CompilerEnvironment& compilerEnv,
LifoAlloc& lifo,
@@ -9307,52 +9317,28 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
Decoder d(func.begin, func.end, func.lineOrBytecode, error);
// Build the local types vector.
-
- const FuncType& funcType = *moduleEnv.funcs[func.index].type;
ValTypeVector locals;
- if (!locals.appendAll(funcType.args())) {
- return false;
- }
- if (!DecodeLocalEntries(d, *moduleEnv.types, moduleEnv.features, &locals)) {
+ if (!DecodeLocalEntriesWithParams(d, moduleEnv, func.index, &locals)) {
return false;
}
// Set up for Ion compilation.
-
const JitCompileOptions options;
MIRGraph graph(&alloc);
CompileInfo compileInfo(locals.length());
MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
IonOptimizations.get(OptimizationLevel::Wasm));
- if (moduleEnv.numMemories() > 0) {
- if (moduleEnv.memories[0].indexType() == IndexType::I32) {
- mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength32());
- } else {
- mir.initMinWasmMemory0Length(moduleEnv.memories[0].initialLength64());
- }
- }
// Build MIR graph
- {
- FunctionCompiler f(moduleEnv, d, func, locals, mir, masm.tryNotes());
- if (!f.init()) {
- return false;
- }
-
- if (!f.startBlock()) {
- return false;
- }
-
- if (!EmitBodyExprs(f)) {
- return false;
- }
-
- f.finish();
-
- // Record observed feature usage
- code->featureUsage |= f.featureUsage();
+ FeatureUsage observedFeatures;
+ if (!IonBuildMIR(d, moduleEnv, func, locals, mir, masm.tryNotes(),
+ &observedFeatures, error)) {
+ return false;
}
+ // Record observed feature usage
+ code->featureUsage |= observedFeatures;
+
// Compile MIR graph
{
jit::SpewBeginWasmFunction(&mir, func.index);
@@ -9373,7 +9359,7 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
BytecodeOffset prologueTrapOffset(func.lineOrBytecode);
FuncOffsets offsets;
- ArgTypeVector args(funcType);
+ ArgTypeVector args(*moduleEnv.funcs[func.index].type);
if (!codegen.generateWasm(CallIndirectId::forFunc(moduleEnv, func.index),
prologueTrapOffset, args, trapExitLayout,
trapExitLayoutNumWords, &offsets,
@@ -9407,6 +9393,66 @@ bool wasm::IonCompileFunctions(const ModuleEnvironment& moduleEnv,
return code->swap(masm);
}
+bool wasm::IonDumpFunction(const ModuleEnvironment& moduleEnv,
+ const FuncCompileInput& func,
+ IonDumpContents contents, GenericPrinter& out,
+ UniqueChars* error) {
+ LifoAlloc lifo(TempAllocator::PreferredLifoChunkSize);
+ TempAllocator alloc(&lifo);
+ JitContext jitContext;
+ Decoder d(func.begin, func.end, func.lineOrBytecode, error);
+
+ // Decode the locals.
+ ValTypeVector locals;
+ if (!DecodeLocalEntriesWithParams(d, moduleEnv, func.index, &locals)) {
+ return false;
+ }
+
+ // Set up for Ion compilation.
+ const JitCompileOptions options;
+ MIRGraph graph(&alloc);
+ CompileInfo compileInfo(locals.length());
+ MIRGenerator mir(nullptr, options, &alloc, &graph, &compileInfo,
+ IonOptimizations.get(OptimizationLevel::Wasm));
+
+ // Build MIR graph
+ TryNoteVector tryNotes;
+ FeatureUsage observedFeatures;
+ if (!IonBuildMIR(d, moduleEnv, func, locals, mir, tryNotes, &observedFeatures,
+ error)) {
+ return false;
+ }
+
+ if (contents == IonDumpContents::UnoptimizedMIR) {
+ graph.dump(out);
+ return true;
+ }
+
+ // Optimize the MIR graph
+ if (!OptimizeMIR(&mir)) {
+ return false;
+ }
+
+ if (contents == IonDumpContents::OptimizedMIR) {
+ graph.dump(out);
+ return true;
+ }
+
+#ifdef JS_JITSPEW
+ // Generate the LIR graph
+ LIRGraph* lir = GenerateLIR(&mir);
+ if (!lir) {
+ return false;
+ }
+
+ MOZ_ASSERT(contents == IonDumpContents::LIR);
+ lir->dump(out);
+#else
+ out.printf("cannot dump LIR without --enable-jitspew");
+#endif
+ return true;
+}
+
bool js::wasm::IonPlatformSupport() {
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS64) || \
diff --git a/js/src/wasm/WasmIonCompile.h b/js/src/wasm/WasmIonCompile.h
index f583cbad1f..4597c08be1 100644
--- a/js/src/wasm/WasmIonCompile.h
+++ b/js/src/wasm/WasmIonCompile.h
@@ -22,6 +22,9 @@
#include "wasm/WasmGenerator.h"
namespace js {
+
+class GenericPrinter;
+
namespace wasm {
// Return whether IonCompileFunction() can generate code on the current device.
@@ -35,6 +38,11 @@ namespace wasm {
const FuncCompileInputVector& inputs,
CompiledCode* code, UniqueChars* error);
+[[nodiscard]] bool IonDumpFunction(const ModuleEnvironment& moduleEnv,
+ const FuncCompileInput& func,
+ IonDumpContents contents,
+ GenericPrinter& out, UniqueChars* error);
+
} // namespace wasm
} // namespace js
diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
index 6cc9528415..2eb5e355d9 100644
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -768,8 +768,14 @@ static JSObject* GetWasmConstructorPrototype(JSContext* cx,
}
#ifdef ENABLE_WASM_TYPE_REFLECTIONS
-static JSString* UTF8CharsToString(JSContext* cx, const char* chars) {
- return NewStringCopyUTF8Z(cx, JS::ConstUTF8CharsZ(chars, strlen(chars)));
+template <typename T>
+static JSString* TypeToString(JSContext* cx, T type) {
+ UniqueChars chars = ToString(type, nullptr);
+ if (!chars) {
+ return nullptr;
+ }
+ return NewStringCopyUTF8Z(
+ cx, JS::ConstUTF8CharsZ(chars.get(), strlen(chars.get())));
}
[[nodiscard]] static JSObject* ValTypesToArray(JSContext* cx,
@@ -779,8 +785,7 @@ static JSString* UTF8CharsToString(JSContext* cx, const char* chars) {
return nullptr;
}
for (ValType valType : valTypes) {
- RootedString type(cx,
- UTF8CharsToString(cx, ToString(valType, nullptr).get()));
+ RootedString type(cx, TypeToString(cx, valType));
if (!type) {
return nullptr;
}
@@ -809,15 +814,14 @@ static JSObject* FuncTypeToObject(JSContext* cx, const FuncType& type) {
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
static JSObject* TableTypeToObject(JSContext* cx, RefType type,
uint32_t initial, Maybe<uint32_t> maximum) {
Rooted<IdValueVector> props(cx, IdValueVector(cx));
- RootedString elementType(
- cx, UTF8CharsToString(cx, ToString(type, nullptr).get()));
+ RootedString elementType(cx, TypeToString(cx, type));
if (!elementType || !props.append(IdValuePair(NameToId(cx->names().element),
StringValue(elementType)))) {
ReportOutOfMemory(cx);
@@ -838,7 +842,7 @@ static JSObject* TableTypeToObject(JSContext* cx, RefType type,
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
static JSObject* MemoryTypeToObject(JSContext* cx, bool shared,
@@ -892,7 +896,7 @@ static JSObject* MemoryTypeToObject(JSContext* cx, bool shared,
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
static JSObject* GlobalTypeToObject(JSContext* cx, ValType type,
@@ -905,15 +909,14 @@ static JSObject* GlobalTypeToObject(JSContext* cx, ValType type,
return nullptr;
}
- RootedString valueType(cx,
- UTF8CharsToString(cx, ToString(type, nullptr).get()));
+ RootedString valueType(cx, TypeToString(cx, type));
if (!valueType || !props.append(IdValuePair(NameToId(cx->names().value),
StringValue(valueType)))) {
ReportOutOfMemory(cx);
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
static JSObject* TagTypeToObject(JSContext* cx,
@@ -928,7 +931,7 @@ static JSObject* TagTypeToObject(JSContext* cx,
return nullptr;
}
- return NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ return NewPlainObjectWithUniqueNames(cx, props);
}
#endif // ENABLE_WASM_TYPE_REFLECTIONS
@@ -1184,8 +1187,7 @@ bool WasmModuleObject::imports(JSContext* cx, unsigned argc, Value* vp) {
}
#endif // ENABLE_WASM_TYPE_REFLECTIONS
- JSObject* obj =
- NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ JSObject* obj = NewPlainObjectWithUniqueNames(cx, props);
if (!obj) {
return false;
}
@@ -1288,8 +1290,7 @@ bool WasmModuleObject::exports(JSContext* cx, unsigned argc, Value* vp) {
}
#endif // ENABLE_WASM_TYPE_REFLECTIONS
- JSObject* obj =
- NewPlainObjectWithUniqueNames(cx, props.begin(), props.length());
+ JSObject* obj = NewPlainObjectWithUniqueNames(cx, props);
if (!obj) {
return false;
}
@@ -3227,7 +3228,7 @@ void WasmGlobalObject::finalize(JS::GCContext* gcx, JSObject* obj) {
// Release the strong reference to the type definitions this global could
// be referencing.
global->type().Release();
- gcx->delete_(obj, &global->val(), MemoryUse::WasmGlobalCell);
+ gcx->delete_(obj, &global->mutableVal(), MemoryUse::WasmGlobalCell);
}
}
@@ -3253,7 +3254,9 @@ WasmGlobalObject* WasmGlobalObject::create(JSContext* cx, HandleVal value,
// It's simpler to initialize the cell after the object has been created,
// to avoid needing to root the cell before the object creation.
- obj->val() = value.get();
+ // We don't use `setVal` here because the assumes the cell has already
+ // been initialized.
+ obj->mutableVal() = value.get();
// Acquire a strong reference to a type definition this global could
// be referencing.
obj->type().AddRef();
@@ -3384,7 +3387,7 @@ bool WasmGlobalObject::valueSetterImpl(JSContext* cx, const CallArgs& args) {
if (!Val::fromJSValue(cx, global->type(), args.get(0), &val)) {
return false;
}
- global->val() = val.get();
+ global->setVal(val);
args.rval().setUndefined();
return true;
@@ -3417,10 +3420,23 @@ bool WasmGlobalObject::isMutable() const {
ValType WasmGlobalObject::type() const { return val().get().type(); }
-GCPtrVal& WasmGlobalObject::val() const {
+GCPtrVal& WasmGlobalObject::mutableVal() {
+ return *reinterpret_cast<GCPtrVal*>(getReservedSlot(VAL_SLOT).toPrivate());
+}
+
+const GCPtrVal& WasmGlobalObject::val() const {
return *reinterpret_cast<GCPtrVal*>(getReservedSlot(VAL_SLOT).toPrivate());
}
+void WasmGlobalObject::setVal(wasm::HandleVal value) {
+ MOZ_ASSERT(type() == value.get().type());
+ mutableVal() = value;
+}
+
+void* WasmGlobalObject::addressOfCell() const {
+ return (void*)&val().get().cell();
+}
+
#ifdef ENABLE_WASM_TYPE_REFLECTIONS
/* static */
bool WasmGlobalObject::typeImpl(JSContext* cx, const CallArgs& args) {
@@ -4652,6 +4668,10 @@ static bool WebAssembly_validate(JSContext* cx, unsigned argc, Value* vp) {
}
FeatureOptions options;
+ if (!options.init(cx, callArgs.get(1))) {
+ return false;
+ }
+
UniqueChars error;
bool validated = Validate(cx, *bytecode, options, &error);
@@ -5351,15 +5371,13 @@ static bool WebAssemblyClassFinish(JSContext* cx, HandleObject object,
}
}
- if (ExceptionsAvailable(cx)) {
- constexpr NameAndProtoKey exceptionEntries[] = {
- {"Tag", JSProto_WasmTag},
- {"Exception", JSProto_WasmException},
- };
- for (const auto& entry : exceptionEntries) {
- if (!WebAssemblyDefineConstructor(cx, wasm, entry, &ctorValue, &id)) {
- return false;
- }
+ constexpr NameAndProtoKey exceptionEntries[] = {
+ {"Tag", JSProto_WasmTag},
+ {"Exception", JSProto_WasmException},
+ };
+ for (const auto& entry : exceptionEntries) {
+ if (!WebAssemblyDefineConstructor(cx, wasm, entry, &ctorValue, &id)) {
+ return false;
}
}
diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
index 10c71b436b..27d49701a9 100644
--- a/js/src/wasm/WasmJS.h
+++ b/js/src/wasm/WasmJS.h
@@ -167,6 +167,8 @@ class WasmGlobalObject : public NativeObject {
static bool valueSetterImpl(JSContext* cx, const CallArgs& args);
static bool valueSetter(JSContext* cx, unsigned argc, Value* vp);
+ wasm::GCPtrVal& mutableVal();
+
public:
static const unsigned RESERVED_SLOTS = 2;
static const JSClass class_;
@@ -182,7 +184,9 @@ class WasmGlobalObject : public NativeObject {
bool isMutable() const;
wasm::ValType type() const;
- wasm::GCPtrVal& val() const;
+ const wasm::GCPtrVal& val() const;
+ void setVal(wasm::HandleVal value);
+ void* addressOfCell() const;
};
// The class of WebAssembly.Instance. Each WasmInstanceObject owns a
diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
index c2de0429d3..a297e81ad3 100644
--- a/js/src/wasm/WasmModule.cpp
+++ b/js/src/wasm/WasmModule.cpp
@@ -867,7 +867,7 @@ static bool GetGlobalExport(JSContext* cx,
MOZ_RELEASE_ASSERT(!global.isImport());
RootedVal globalVal(cx);
instanceObj->instance().constantGlobalGet(globalIndex, &globalVal);
- globalObj->val() = globalVal;
+ globalObj->setVal(globalVal);
return true;
}
diff --git a/js/src/wasm/WasmOpIter.cpp b/js/src/wasm/WasmOpIter.cpp
index 102d39639c..d60a87dc12 100644
--- a/js/src/wasm/WasmOpIter.cpp
+++ b/js/src/wasm/WasmOpIter.cpp
@@ -25,14 +25,14 @@ using namespace js::jit;
using namespace js::wasm;
#ifdef ENABLE_WASM_GC
-# ifndef ENABLE_WASM_FUNCTION_REFERENCES
+# ifndef ENABLE_WASM_GC
# error "GC types require the function-references feature"
# endif
#endif
#ifdef DEBUG
-# ifdef ENABLE_WASM_FUNCTION_REFERENCES
+# ifdef ENABLE_WASM_GC
# define WASM_FUNCTION_REFERENCES_OP(code) return code
# else
# define WASM_FUNCTION_REFERENCES_OP(code) break
diff --git a/js/src/wasm/WasmOpIter.h b/js/src/wasm/WasmOpIter.h
index 1711cc3926..59d494bfbf 100644
--- a/js/src/wasm/WasmOpIter.h
+++ b/js/src/wasm/WasmOpIter.h
@@ -165,7 +165,7 @@ enum class OpKind {
ReturnCall,
CallIndirect,
ReturnCallIndirect,
-# ifdef ENABLE_WASM_FUNCTION_REFERENCES
+# ifdef ENABLE_WASM_GC
CallRef,
ReturnCallRef,
# endif
@@ -493,7 +493,8 @@ class MOZ_STACK_CLASS OpIter : private Policy {
[[nodiscard]] bool getControl(uint32_t relativeDepth, Control** controlEntry);
[[nodiscard]] bool checkBranchValueAndPush(uint32_t relativeDepth,
ResultType* type,
- ValueVector* values);
+ ValueVector* values,
+ bool rewriteStackTypes);
[[nodiscard]] bool checkBrTableEntryAndPush(uint32_t* relativeDepth,
ResultType prevBranchType,
ResultType* branchType,
@@ -533,7 +534,7 @@ class MOZ_STACK_CLASS OpIter : private Policy {
inline bool checkIsSubtypeOf(ResultType params, ResultType results);
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
inline bool checkIsSubtypeOf(uint32_t actualTypeIndex,
uint32_t expectedTypeIndex);
#endif
@@ -703,7 +704,7 @@ class MOZ_STACK_CLASS OpIter : private Policy {
uint32_t* tableIndex, Value* callee,
ValueVector* argValues);
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
[[nodiscard]] bool readCallRef(const FuncType** funcType, Value* callee,
ValueVector* argValues);
@@ -932,7 +933,7 @@ inline bool OpIter<Policy>::checkIsSubtypeOf(ResultType params,
return true;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
template <typename Policy>
inline bool OpIter<Policy>::checkIsSubtypeOf(uint32_t actualTypeIndex,
uint32_t expectedTypeIndex) {
@@ -1480,14 +1481,15 @@ inline void OpIter<Policy>::popEnd() {
template <typename Policy>
inline bool OpIter<Policy>::checkBranchValueAndPush(uint32_t relativeDepth,
ResultType* type,
- ValueVector* values) {
+ ValueVector* values,
+ bool rewriteStackTypes) {
Control* block = nullptr;
if (!getControl(relativeDepth, &block)) {
return false;
}
*type = block->branchTargetType();
- return checkTopTypeMatches(*type, values, /*rewriteStackTypes=*/false);
+ return checkTopTypeMatches(*type, values, rewriteStackTypes);
}
template <typename Policy>
@@ -1499,7 +1501,8 @@ inline bool OpIter<Policy>::readBr(uint32_t* relativeDepth, ResultType* type,
return fail("unable to read br depth");
}
- if (!checkBranchValueAndPush(*relativeDepth, type, values)) {
+ if (!checkBranchValueAndPush(*relativeDepth, type, values,
+ /*rewriteStackTypes=*/false)) {
return false;
}
@@ -1520,7 +1523,8 @@ inline bool OpIter<Policy>::readBrIf(uint32_t* relativeDepth, ResultType* type,
return false;
}
- return checkBranchValueAndPush(*relativeDepth, type, values);
+ return checkBranchValueAndPush(*relativeDepth, type, values,
+ /*rewriteStackTypes=*/true);
}
#define UNKNOWN_ARITY UINT32_MAX
@@ -2392,10 +2396,10 @@ inline bool OpIter<Policy>::readRefFunc(uint32_t* funcIndex) {
"function index is not declared in a section before the code section");
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
// When function references enabled, push type index on the stack, e.g. for
// validation of the call_ref instruction.
- if (env_.functionReferencesEnabled()) {
+ if (env_.gcEnabled()) {
const uint32_t typeIndex = env_.funcs[*funcIndex].typeIndex;
const TypeDef& typeDef = env_.types->type(typeIndex);
return push(RefType::fromTypeDef(&typeDef, false));
@@ -2457,7 +2461,8 @@ inline bool OpIter<Policy>::readBrOnNull(uint32_t* relativeDepth,
return false;
}
- if (!checkBranchValueAndPush(*relativeDepth, type, values)) {
+ if (!checkBranchValueAndPush(*relativeDepth, type, values,
+ /*rewriteStackTypes=*/true)) {
return false;
}
@@ -2505,7 +2510,7 @@ inline bool OpIter<Policy>::readBrOnNonNull(uint32_t* relativeDepth,
}
// Check if the type stack matches the branch target type.
- if (!checkTopTypeMatches(*type, values, /*rewriteStackTypes=*/false)) {
+ if (!checkTopTypeMatches(*type, values, /*rewriteStackTypes=*/true)) {
return false;
}
@@ -2693,7 +2698,7 @@ inline bool OpIter<Policy>::readReturnCallIndirect(uint32_t* funcTypeIndex,
}
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
template <typename Policy>
inline bool OpIter<Policy>::readCallRef(const FuncType** funcType,
Value* callee, ValueVector* argValues) {
@@ -2719,7 +2724,7 @@ inline bool OpIter<Policy>::readCallRef(const FuncType** funcType,
}
#endif
-#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_FUNCTION_REFERENCES)
+#if defined(ENABLE_WASM_TAIL_CALLS) && defined(ENABLE_WASM_GC)
template <typename Policy>
inline bool OpIter<Policy>::readReturnCallRef(const FuncType** funcType,
Value* callee,
@@ -4001,7 +4006,7 @@ inline bool OpIter<Policy>::readBrOnCast(bool onSuccess,
fallthroughTypes[labelTypeNumValues - 1] = typeOnFallthrough;
return checkTopTypeMatches(ResultType::Vector(fallthroughTypes), values,
- /*rewriteStackTypes=*/false);
+ /*rewriteStackTypes=*/true);
}
template <typename Policy>
@@ -4228,18 +4233,18 @@ inline bool OpIter<Policy>::readCallBuiltinModuleFunc(
return fail("index out of range");
}
- *builtinModuleFunc = &BuiltinModuleFunc::getFromId(BuiltinModuleFuncId(id));
+ *builtinModuleFunc = &BuiltinModuleFuncs::getFromId(BuiltinModuleFuncId(id));
- if ((*builtinModuleFunc)->usesMemory && env_.numMemories() == 0) {
+ if ((*builtinModuleFunc)->usesMemory() && env_.numMemories() == 0) {
return fail("can't touch memory without memory");
}
- if (!popWithTypes((*builtinModuleFunc)->params, params)) {
+
+ const FuncType& funcType = *(*builtinModuleFunc)->funcType();
+ if (!popCallArgs(funcType.args(), params)) {
return false;
}
- if ((*builtinModuleFunc)->result.isNothing()) {
- return true;
- }
- return push(*(*builtinModuleFunc)->result);
+
+ return push(ResultType::Vector(funcType.results()));
}
} // namespace wasm
diff --git a/js/src/wasm/WasmProcess.cpp b/js/src/wasm/WasmProcess.cpp
index 427ba42d9d..0436e0a23f 100644
--- a/js/src/wasm/WasmProcess.cpp
+++ b/js/src/wasm/WasmProcess.cpp
@@ -26,10 +26,12 @@
#include "threading/ExclusiveData.h"
#include "vm/MutexIDs.h"
#include "vm/Runtime.h"
+#include "wasm/WasmBuiltinModule.h"
#include "wasm/WasmBuiltins.h"
#include "wasm/WasmCode.h"
#include "wasm/WasmInstance.h"
#include "wasm/WasmModuleTypes.h"
+#include "wasm/WasmStaticTypeDefs.h"
using namespace js;
using namespace wasm;
@@ -438,6 +440,15 @@ bool wasm::Init() {
oomUnsafe.crash("js::wasm::Init");
}
+ if (!StaticTypeDefs::init()) {
+ oomUnsafe.crash("js::wasm::Init");
+ }
+
+ // This uses StaticTypeDefs
+ if (!BuiltinModuleFuncs::init()) {
+ oomUnsafe.crash("js::wasm::Init");
+ }
+
sProcessCodeSegmentMap = map;
if (!InitTagForJSValue()) {
@@ -455,6 +466,8 @@ void wasm::ShutDown() {
return;
}
+ BuiltinModuleFuncs::destroy();
+ StaticTypeDefs::destroy();
PurgeCanonicalTypes();
if (sWrappedJSValueTagType) {
diff --git a/js/src/wasm/WasmSerialize.cpp b/js/src/wasm/WasmSerialize.cpp
index 62a68c5aff..35f437688c 100644
--- a/js/src/wasm/WasmSerialize.cpp
+++ b/js/src/wasm/WasmSerialize.cpp
@@ -957,7 +957,7 @@ CoderResult CodeSymbolicLinkArray(
template <CoderMode mode>
CoderResult CodeLinkData(Coder<mode>& coder,
CoderArg<mode, wasm::LinkData> item) {
- WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::LinkData, 8832);
+ WASM_VERIFY_SERIALIZATION_FOR_SIZE(wasm::LinkData, 8976);
if constexpr (mode == MODE_ENCODE) {
MOZ_ASSERT(item->tier == Tier::Serialized);
}
diff --git a/js/src/wasm/WasmStaticTypeDefs.cpp b/js/src/wasm/WasmStaticTypeDefs.cpp
new file mode 100644
index 0000000000..2306339087
--- /dev/null
+++ b/js/src/wasm/WasmStaticTypeDefs.cpp
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2023 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmStaticTypeDefs.h"
+
+#include "wasm/WasmTypeDef.h"
+
+using namespace js;
+using namespace js::wasm;
+
+const TypeDef* StaticTypeDefs::arrayMutI16 = nullptr;
+
+bool StaticTypeDefs::init() {
+ RefPtr<TypeContext> types = js_new<TypeContext>();
+ if (!types) {
+ return false;
+ }
+
+#ifdef ENABLE_WASM_GC
+ arrayMutI16 = types->addType(ArrayType(StorageType::I16, true));
+ if (!arrayMutI16) {
+ return false;
+ }
+ arrayMutI16->recGroup().AddRef();
+#endif
+
+ return true;
+}
+
+void StaticTypeDefs::destroy() {
+ if (arrayMutI16) {
+ arrayMutI16->recGroup().Release();
+ arrayMutI16 = nullptr;
+ }
+}
diff --git a/js/src/wasm/WasmStaticTypeDefs.h b/js/src/wasm/WasmStaticTypeDefs.h
new file mode 100644
index 0000000000..d3a01ad26c
--- /dev/null
+++ b/js/src/wasm/WasmStaticTypeDefs.h
@@ -0,0 +1,41 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_static_type_defs
+#define wasm_static_type_defs
+
+namespace js {
+namespace wasm {
+
+class TypeDef;
+
+// Simple type definitions used in builtins with a static lifetime.
+//
+// TODO: this class is very simple and won't scale well with many type
+// definitions. Rethink this if we have more than several type definitions.
+struct StaticTypeDefs {
+ static const TypeDef* arrayMutI16;
+
+ [[nodiscard]] static bool init();
+ static void destroy();
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_static_type_defs
diff --git a/js/src/wasm/WasmTypeDef.h b/js/src/wasm/WasmTypeDef.h
index 7aedbed1f8..3426647095 100644
--- a/js/src/wasm/WasmTypeDef.h
+++ b/js/src/wasm/WasmTypeDef.h
@@ -1228,13 +1228,16 @@ class TypeContext : public AtomicRefCounted<TypeContext> {
}
template <typename T>
- [[nodiscard]] bool addType(T&& type) {
+ [[nodiscard]] const TypeDef* addType(T&& type) {
MutableRecGroup recGroup = startRecGroup(1);
if (!recGroup) {
- return false;
+ return nullptr;
}
recGroup->type(0) = std::move(type);
- return endRecGroup();
+ if (!endRecGroup()) {
+ return nullptr;
+ }
+ return &this->type(length() - 1);
}
const TypeDef& type(uint32_t index) const { return *types_[index]; }
diff --git a/js/src/wasm/WasmValType.cpp b/js/src/wasm/WasmValType.cpp
index d1874b7131..64ef8ff85a 100644
--- a/js/src/wasm/WasmValType.cpp
+++ b/js/src/wasm/WasmValType.cpp
@@ -150,94 +150,7 @@ enum class RefTypeResult {
Unparsed,
};
-static RefTypeResult MaybeToRefType(JSContext* cx, HandleObject obj,
- RefType* out) {
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
- if (!wasm::FunctionReferencesAvailable(cx)) {
- return RefTypeResult::Unparsed;
- }
-
- JSAtom* refAtom = Atomize(cx, "ref", strlen("ref"));
- if (!refAtom) {
- return RefTypeResult::Failure;
- }
- RootedId refId(cx, AtomToId(refAtom));
-
- RootedValue refVal(cx);
- if (!GetProperty(cx, obj, obj, refId, &refVal)) {
- return RefTypeResult::Failure;
- }
-
- RootedString typeStr(cx, ToString(cx, refVal));
- if (!typeStr) {
- return RefTypeResult::Failure;
- }
-
- Rooted<JSLinearString*> typeLinearStr(cx, typeStr->ensureLinear(cx));
- if (!typeLinearStr) {
- return RefTypeResult::Failure;
- }
-
- if (StringEqualsLiteral(typeLinearStr, "func")) {
- *out = RefType::func();
- } else if (StringEqualsLiteral(typeLinearStr, "extern")) {
- *out = RefType::extern_();
-# ifdef ENABLE_WASM_EXNREF
- } else if (ExnRefAvailable(cx) && StringEqualsLiteral(typeLinearStr, "exn")) {
- *out = RefType::exn();
-# endif
-# ifdef ENABLE_WASM_GC
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "any")) {
- *out = RefType::any();
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "eq")) {
- *out = RefType::eq();
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "i31")) {
- *out = RefType::i31();
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "struct")) {
- *out = RefType::struct_();
- } else if (GcAvailable(cx) && StringEqualsLiteral(typeLinearStr, "array")) {
- *out = RefType::array();
-# endif
- } else {
- return RefTypeResult::Unparsed;
- }
-
- JSAtom* nullableAtom = Atomize(cx, "nullable", strlen("nullable"));
- if (!nullableAtom) {
- return RefTypeResult::Failure;
- }
- RootedId nullableId(cx, AtomToId(nullableAtom));
- RootedValue nullableVal(cx);
- if (!GetProperty(cx, obj, obj, nullableId, &nullableVal)) {
- return RefTypeResult::Failure;
- }
-
- bool nullable = ToBoolean(nullableVal);
- if (!nullable) {
- *out = out->asNonNullable();
- }
- MOZ_ASSERT(out->isNullable() == nullable);
- return RefTypeResult::Parsed;
-#else
- return RefTypeResult::Unparsed;
-#endif
-}
-
bool wasm::ToValType(JSContext* cx, HandleValue v, ValType* out) {
- if (v.isObject()) {
- RootedObject obj(cx, &v.toObject());
- RefType refType;
- switch (MaybeToRefType(cx, obj, &refType)) {
- case RefTypeResult::Failure:
- return false;
- case RefTypeResult::Parsed:
- *out = ValType(refType);
- return true;
- case RefTypeResult::Unparsed:
- break;
- }
- }
-
RootedString typeStr(cx, ToString(cx, v));
if (!typeStr) {
return false;
@@ -274,18 +187,6 @@ bool wasm::ToValType(JSContext* cx, HandleValue v, ValType* out) {
}
bool wasm::ToRefType(JSContext* cx, HandleValue v, RefType* out) {
- if (v.isObject()) {
- RootedObject obj(cx, &v.toObject());
- switch (MaybeToRefType(cx, obj, out)) {
- case RefTypeResult::Failure:
- return false;
- case RefTypeResult::Parsed:
- return true;
- case RefTypeResult::Unparsed:
- break;
- }
- }
-
RootedString typeStr(cx, ToString(cx, v));
if (!typeStr) {
return false;
diff --git a/js/src/wasm/WasmValType.h b/js/src/wasm/WasmValType.h
index 0821ee5df9..c98eda28dd 100644
--- a/js/src/wasm/WasmValType.h
+++ b/js/src/wasm/WasmValType.h
@@ -479,7 +479,7 @@ class StorageTypeTraits {
case TypeCode::NullExternRef:
case TypeCode::NullAnyRef:
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case AbstractTypeRefCode:
#endif
return true;
@@ -557,7 +557,7 @@ class ValTypeTraits {
case TypeCode::NullExternRef:
case TypeCode::NullAnyRef:
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case AbstractTypeRefCode:
#endif
return true;
diff --git a/js/src/wasm/WasmValidate.cpp b/js/src/wasm/WasmValidate.cpp
index e964c11d04..98a1423a41 100644
--- a/js/src/wasm/WasmValidate.cpp
+++ b/js/src/wasm/WasmValidate.cpp
@@ -89,14 +89,19 @@ bool wasm::EncodeLocalEntries(Encoder& e, const ValTypeVector& locals) {
return true;
}
-bool wasm::DecodeLocalEntries(Decoder& d, const TypeContext& types,
- const FeatureArgs& features,
- ValTypeVector* locals) {
+bool wasm::DecodeLocalEntriesWithParams(Decoder& d,
+ const ModuleEnvironment& env,
+ uint32_t funcIndex,
+ ValTypeVector* locals) {
uint32_t numLocalEntries;
if (!d.readVarU32(&numLocalEntries)) {
return d.fail("failed to read number of local entries");
}
+ if (!locals->appendAll(env.funcs[funcIndex].type->args())) {
+ return false;
+ }
+
for (uint32_t i = 0; i < numLocalEntries; i++) {
uint32_t count;
if (!d.readVarU32(&count)) {
@@ -108,7 +113,7 @@ bool wasm::DecodeLocalEntries(Decoder& d, const TypeContext& types,
}
ValType type;
- if (!d.readValType(types, features, &type)) {
+ if (!d.readValType(*env.types, env.features, &type)) {
return false;
}
@@ -235,9 +240,9 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
&unusedArgs));
}
#endif
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::CallRef): {
- if (!env.functionReferencesEnabled()) {
+ if (!env.gcEnabled()) {
return iter.unrecognizedOpcode(&op);
}
const FuncType* unusedType;
@@ -246,7 +251,7 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
}
# ifdef ENABLE_WASM_TAIL_CALLS
case uint16_t(Op::ReturnCallRef): {
- if (!env.functionReferencesEnabled() || !env.tailCallsEnabled()) {
+ if (!env.gcEnabled() || !env.tailCallsEnabled()) {
return iter.unrecognizedOpcode(&op);
}
const FuncType* unusedType;
@@ -1240,15 +1245,15 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
}
break;
}
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
case uint16_t(Op::RefAsNonNull): {
- if (!env.functionReferencesEnabled()) {
+ if (!env.gcEnabled()) {
return iter.unrecognizedOpcode(&op);
}
CHECK(iter.readRefAsNonNull(&nothing));
}
case uint16_t(Op::BrOnNull): {
- if (!env.functionReferencesEnabled()) {
+ if (!env.gcEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedDepth;
@@ -1256,7 +1261,7 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
iter.readBrOnNull(&unusedDepth, &unusedType, &nothings, &nothing));
}
case uint16_t(Op::BrOnNonNull): {
- if (!env.functionReferencesEnabled()) {
+ if (!env.gcEnabled()) {
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedDepth;
@@ -1285,31 +1290,19 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
CHECK(iter.readRefIsNull(&nothing));
}
case uint16_t(Op::Try):
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
CHECK(iter.readTry(&unusedType));
case uint16_t(Op::Catch): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
LabelKind unusedKind;
uint32_t unusedIndex;
CHECK(iter.readCatch(&unusedKind, &unusedIndex, &unusedType,
&unusedType, &nothings));
}
case uint16_t(Op::CatchAll): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
LabelKind unusedKind;
CHECK(iter.readCatchAll(&unusedKind, &unusedType, &unusedType,
&nothings));
}
case uint16_t(Op::Delegate): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
uint32_t unusedDepth;
if (!iter.readDelegate(&unusedDepth, &unusedType, &nothings)) {
return false;
@@ -1318,16 +1311,10 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
break;
}
case uint16_t(Op::Throw): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
uint32_t unusedIndex;
CHECK(iter.readThrow(&unusedIndex, &nothings));
}
case uint16_t(Op::Rethrow): {
- if (!env.exceptionsEnabled()) {
- return iter.unrecognizedOpcode(&op);
- }
uint32_t unusedDepth;
CHECK(iter.readRethrow(&unusedDepth));
}
@@ -1541,14 +1528,10 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
bool wasm::ValidateFunctionBody(const ModuleEnvironment& env,
uint32_t funcIndex, uint32_t bodySize,
Decoder& d) {
- ValTypeVector locals;
- if (!locals.appendAll(env.funcs[funcIndex].type->args())) {
- return false;
- }
-
const uint8_t* bodyBegin = d.currentPosition();
- if (!DecodeLocalEntries(d, *env.types, env.features, &locals)) {
+ ValTypeVector locals;
+ if (!DecodeLocalEntriesWithParams(d, env, funcIndex, &locals)) {
return false;
}
@@ -1624,7 +1607,7 @@ static bool DecodeFuncType(Decoder& d, ModuleEnvironment* env,
static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
StructType* structType) {
if (!env->gcEnabled()) {
- return d.fail("Structure types not enabled");
+ return d.fail("gc not enabled");
}
uint32_t numFields;
@@ -1668,7 +1651,7 @@ static bool DecodeStructType(Decoder& d, ModuleEnvironment* env,
static bool DecodeArrayType(Decoder& d, ModuleEnvironment* env,
ArrayType* arrayType) {
if (!env->gcEnabled()) {
- return d.fail("gc types not enabled");
+ return d.fail("gc not enabled");
}
StorageType elementType;
@@ -2247,13 +2230,6 @@ static bool CheckImportsAgainstBuiltinModules(Decoder& d,
return true;
}
- // Allocate a type context for builtin types so we can canonicalize them
- // and use them in type comparisons
- RefPtr<TypeContext> builtinTypes = js_new<TypeContext>();
- if (!builtinTypes) {
- return false;
- }
-
uint32_t importFuncIndex = 0;
for (auto& import : env->imports) {
Maybe<BuiltinModuleId> builtinModule =
@@ -2278,21 +2254,9 @@ static bool CheckImportsAgainstBuiltinModules(Decoder& d,
return d.fail("unrecognized builtin module field");
}
- // Get a canonicalized type definition for this builtin so we can
- // accurately compare it against the import type.
- FuncType builtinFuncType;
- if (!(*builtinFunc)->funcType(&builtinFuncType)) {
- return false;
- }
- if (!builtinTypes->addType(builtinFuncType)) {
- return false;
- }
- const TypeDef& builtinTypeDef =
- builtinTypes->type(builtinTypes->length() - 1);
-
const TypeDef& importTypeDef = (*env->types)[func.typeIndex];
- if (!TypeDef::isSubTypeOf(&builtinTypeDef, &importTypeDef)) {
- return d.failf("type mismatch in %s", (*builtinFunc)->exportName);
+ if (!TypeDef::isSubTypeOf((*builtinFunc)->typeDef(), &importTypeDef)) {
+ return d.failf("type mismatch in %s", (*builtinFunc)->exportName());
}
break;
}
@@ -2479,10 +2443,6 @@ static bool DecodeTagSection(Decoder& d, ModuleEnvironment* env) {
return true;
}
- if (!env->exceptionsEnabled()) {
- return d.fail("exceptions not enabled");
- }
-
uint32_t numDefs;
if (!d.readVarU32(&numDefs)) {
return d.fail("expected number of tags");
diff --git a/js/src/wasm/WasmValidate.h b/js/src/wasm/WasmValidate.h
index 3254e7b74a..8ba08fd088 100644
--- a/js/src/wasm/WasmValidate.h
+++ b/js/src/wasm/WasmValidate.h
@@ -285,11 +285,13 @@ using ValidatingOpIter = OpIter<ValidatingPolicy>;
Decoder& d,
ValTypeVector* locals);
-// This validates the entries.
+// This validates the entries. Function params are inserted before the locals
+// to generate the full local entries for use in validation
-[[nodiscard]] bool DecodeLocalEntries(Decoder& d, const TypeContext& types,
- const FeatureArgs& features,
- ValTypeVector* locals);
+[[nodiscard]] bool DecodeLocalEntriesWithParams(Decoder& d,
+ const ModuleEnvironment& env,
+ uint32_t funcIndex,
+ ValTypeVector* locals);
// Returns whether the given [begin, end) prefix of a module's bytecode starts a
// code section and, if so, returns the SectionRange of that code section.
diff --git a/js/src/wasm/WasmValue.cpp b/js/src/wasm/WasmValue.cpp
index 3798f6c3e8..6039b00517 100644
--- a/js/src/wasm/WasmValue.cpp
+++ b/js/src/wasm/WasmValue.cpp
@@ -642,7 +642,7 @@ bool wasm::ToWebAssemblyValue(JSContext* cx, HandleValue val, ValType type,
case ValType::V128:
break;
case ValType::Ref:
-#ifdef ENABLE_WASM_FUNCTION_REFERENCES
+#ifdef ENABLE_WASM_GC
if (!type.isNullable() && val.isNull()) {
JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
JSMSG_WASM_BAD_REF_NONNULLABLE_VALUE);
diff --git a/js/src/wasm/WasmValue.h b/js/src/wasm/WasmValue.h
index 66de690537..79e20285b9 100644
--- a/js/src/wasm/WasmValue.h
+++ b/js/src/wasm/WasmValue.h
@@ -224,6 +224,11 @@ class LitVal {
Cell& cell() { return cell_; }
const Cell& cell() const { return cell_; }
+ // Updates the type of the LitVal. Does not check that the type is valid for the
+ // actual value, so make sure the type is definitely correct via validation or
+ // something.
+ void unsafeSetType(ValType type) { type_ = type; }
+
uint32_t i32() const {
MOZ_ASSERT(type_ == ValType::I32);
return cell_.i32_;
@@ -309,11 +314,6 @@ class MOZ_NON_PARAM Val : public LitVal {
return cell_.ref_;
}
- // Updates the type of the Val. Does not check that the type is valid for the
- // actual value, so make sure the type is definitely correct via validation or
- // something.
- void unsafeSetType(ValType type) { type_ = type; }
-
// Initialize from `loc` which is a rooted location and needs no barriers.
void initFromRootedLocation(ValType type, const void* loc);
void initFromHeapLocation(ValType type, const void* loc);
diff --git a/js/src/wasm/moz.build b/js/src/wasm/moz.build
index 8aa23e3516..83fea3b81b 100644
--- a/js/src/wasm/moz.build
+++ b/js/src/wasm/moz.build
@@ -45,6 +45,7 @@ UNIFIED_SOURCES += [
"WasmRealm.cpp",
"WasmSerialize.cpp",
"WasmSignalHandlers.cpp",
+ "WasmStaticTypeDefs.cpp",
"WasmStubs.cpp",
"WasmSummarizeInsn.cpp",
"WasmTable.cpp",