summaryrefslogtreecommitdiffstats
path: root/js/src/vm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:35:37 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:35:37 +0000
commita90a5cba08fdf6c0ceb95101c275108a152a3aed (patch)
tree532507288f3defd7f4dcf1af49698bcb76034855 /js/src/vm
parentAdding debian version 126.0.1-1. (diff)
downloadfirefox-a90a5cba08fdf6c0ceb95101c275108a152a3aed.tar.xz
firefox-a90a5cba08fdf6c0ceb95101c275108a152a3aed.zip
Merging upstream version 127.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/vm')
-rw-r--r--js/src/vm/ArrayBufferObject.cpp12
-rw-r--r--js/src/vm/ArrayBufferObject.h45
-rw-r--r--js/src/vm/BytecodeFormatFlags.h5
-rw-r--r--js/src/vm/BytecodeLocation-inl.h5
-rw-r--r--js/src/vm/BytecodeLocation.h9
-rw-r--r--js/src/vm/BytecodeUtil.cpp13
-rw-r--r--js/src/vm/BytecodeUtil.h16
-rw-r--r--js/src/vm/Caches.h13
-rw-r--r--js/src/vm/CharacterEncoding.cpp1
-rw-r--r--js/src/vm/CommonPropertyNames.h4
-rw-r--r--js/src/vm/Compartment.cpp10
-rw-r--r--js/src/vm/EnvironmentObject.cpp22
-rw-r--r--js/src/vm/Float16.h150
-rw-r--r--js/src/vm/FunctionFlags.h28
-rw-r--r--js/src/vm/GeckoProfiler.cpp9
-rw-r--r--js/src/vm/GlobalObject.cpp16
-rw-r--r--js/src/vm/HelperThreadState.h6
-rw-r--r--js/src/vm/HelperThreads.cpp45
-rw-r--r--js/src/vm/HelperThreads.h42
-rw-r--r--js/src/vm/InternalThreadPool.cpp9
-rw-r--r--js/src/vm/Interpreter.cpp21
-rw-r--r--js/src/vm/JSAtomUtils.cpp9
-rw-r--r--js/src/vm/JSContext.cpp13
-rw-r--r--js/src/vm/JSContext.h10
-rw-r--r--js/src/vm/JSONParser.cpp27
-rw-r--r--js/src/vm/JSObject.cpp22
-rw-r--r--js/src/vm/JSScript.h17
-rw-r--r--js/src/vm/JitActivation.cpp34
-rw-r--r--js/src/vm/JitActivation.h7
-rw-r--r--js/src/vm/ModuleBuilder.h8
-rw-r--r--js/src/vm/Modules.cpp41
-rw-r--r--js/src/vm/Monitor.h15
-rw-r--r--js/src/vm/Opcodes.h137
-rw-r--r--js/src/vm/PortableBaselineInterpret.cpp33
-rw-r--r--js/src/vm/PropMap.cpp18
-rw-r--r--js/src/vm/PropMap.h2
-rw-r--r--js/src/vm/SelfHosting.cpp11
-rw-r--r--js/src/vm/ShapeZone.cpp127
-rw-r--r--js/src/vm/ShapeZone.h2
-rw-r--r--js/src/vm/SharedArrayObject.cpp26
-rw-r--r--js/src/vm/SharedArrayObject.h7
-rw-r--r--js/src/vm/StringType-inl.h1
-rw-r--r--js/src/vm/StringType.cpp128
-rw-r--r--js/src/vm/StringType.h148
-rw-r--r--js/src/vm/TypedArrayObject-inl.h99
-rw-r--r--js/src/vm/TypedArrayObject.cpp334
-rw-r--r--js/src/vm/TypedArrayObject.h21
-rw-r--r--js/src/vm/TypeofEqOperand.h50
-rw-r--r--js/src/vm/Uint8Clamped.h6
49 files changed, 1438 insertions, 396 deletions
diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp
index 14039af574..1610a59ae9 100644
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -207,6 +207,7 @@ void* js::MapBufferMemory(wasm::IndexType t, size_t mappedSize,
void* data = nullptr;
if (int err = posix_memalign(&data, gc::SystemPageSize(), mappedSize)) {
MOZ_ASSERT(err == ENOMEM);
+ (void)err;
return nullptr;
}
MOZ_ASSERT(data);
@@ -2578,8 +2579,15 @@ size_t ArrayBufferObject::objectMoved(JSObject* obj, JSObject* old) {
auto& dst = obj->as<ArrayBufferType>();
const auto& src = old->as<ArrayBufferType>();
- MOZ_ASSERT(
- !obj->runtimeFromMainThread()->gc.nursery().isInside(src.dataPointer()));
+#ifdef DEBUG
+ // Check the data pointer is not inside the nursery, but take account of the
+ // fact that inline data pointers for zero length buffers can point to the end
+ // of a chunk which can abut the start of the nursery.
+ if (src.byteLength() != 0 || (uintptr_t(src.dataPointer()) & gc::ChunkMask)) {
+ Nursery& nursery = obj->runtimeFromMainThread()->gc.nursery();
+ MOZ_ASSERT(!nursery.isInside(src.dataPointer()));
+ }
+#endif
// Fix up possible inline data pointer.
if (src.hasInlineData()) {
diff --git a/js/src/vm/ArrayBufferObject.h b/js/src/vm/ArrayBufferObject.h
index ce78b26cb2..fef5c7d4d9 100644
--- a/js/src/vm/ArrayBufferObject.h
+++ b/js/src/vm/ArrayBufferObject.h
@@ -202,7 +202,10 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared {
static const uint8_t RESERVED_SLOTS = 4;
- static const size_t ARRAY_BUFFER_ALIGNMENT = 8;
+ // Alignment for ArrayBuffer objects. Must match the largest possible
+ // TypedArray scalar to ensure TypedArray and Atomics accesses are always
+ // aligned.
+ static constexpr size_t ARRAY_BUFFER_ALIGNMENT = 8;
static_assert(FLAGS_SLOT == JS_ARRAYBUFFER_FLAGS_SLOT,
"self-hosted code with burned-in constants must get the "
@@ -306,6 +309,7 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared {
void* freeUserData_;
friend class ArrayBufferObject;
+ friend class ResizableArrayBufferObject;
BufferContents(uint8_t* data, BufferKind kind,
JS::BufferContentsFreeFunc freeFunc = nullptr,
@@ -321,6 +325,43 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared {
// BufferContents does not outlive the data.
}
+#ifdef DEBUG
+ // Checks if the buffer contents are properly aligned.
+ //
+ // `malloc(0)` is implementation defined and may return a pointer which
+ // isn't aligned to `max_align_t`, so we only require proper alignment when
+ // `byteLength` is non-zero.
+ //
+ // jemalloc doesn't implement restriction, but instead uses `sizeof(void*)`
+ // for its smallest allocation class. Larger allocations are guaranteed to
+ // be eight byte aligned.
+ bool isAligned(size_t byteLength) const {
+ // `malloc(0)` has implementation defined behavior.
+ if (byteLength == 0) {
+ return true;
+ }
+
+ // Allow jemalloc tiny allocations to have smaller alignment requirements
+ // than `std::malloc`.
+ if (sizeof(void*) < ArrayBufferObject::ARRAY_BUFFER_ALIGNMENT) {
+ if (byteLength <= sizeof(void*)) {
+ return true;
+ }
+ }
+
+ // `std::malloc` returns memory at least as strictly aligned as for
+ // max_align_t and the alignment of max_align_t is a multiple of the array
+ // buffer alignment.
+ static_assert(alignof(std::max_align_t) %
+ ArrayBufferObject::ARRAY_BUFFER_ALIGNMENT ==
+ 0);
+
+ // Otherwise the memory must be correctly alignment.
+ auto ptr = reinterpret_cast<uintptr_t>(data());
+ return ptr % ArrayBufferObject::ARRAY_BUFFER_ALIGNMENT == 0;
+ }
+#endif
+
public:
static BufferContents createInlineData(void* data) {
return BufferContents(static_cast<uint8_t*>(data), INLINE_DATA);
@@ -590,6 +631,7 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared {
}
void initialize(size_t byteLength, BufferContents contents) {
+ MOZ_ASSERT(contents.isAligned(byteLength));
setByteLength(byteLength);
setFlags(0);
setFirstView(nullptr);
@@ -670,6 +712,7 @@ class ResizableArrayBufferObject : public ArrayBufferObject {
void initialize(size_t byteLength, size_t maxByteLength,
BufferContents contents) {
+ MOZ_ASSERT(contents.isAligned(byteLength));
setByteLength(byteLength);
setMaxByteLength(maxByteLength);
setFlags(RESIZABLE);
diff --git a/js/src/vm/BytecodeFormatFlags.h b/js/src/vm/BytecodeFormatFlags.h
index 893f0f0823..bca2b0717f 100644
--- a/js/src/vm/BytecodeFormatFlags.h
+++ b/js/src/vm/BytecodeFormatFlags.h
@@ -41,10 +41,7 @@ enum {
JOF_STRING = 26, /* uint32_t constant index */
JOF_TYPEMASK = 0xFF, /* mask for above immediate types */
- JOF_NAME = 1 << 8, /* name operation */
- JOF_PROP = 2 << 8, /* obj.prop operation */
- JOF_ELEM = 3 << 8, /* obj[index] operation */
- JOF_MODEMASK = 0xFF << 8, /* mask for above addressing modes */
+ /* Bits 0xFF00 are available for future usage */
JOF_PROPSET = 1 << 16, /* property/element/name set operation */
JOF_PROPINIT = 1 << 17, /* property/element/name init operation */
diff --git a/js/src/vm/BytecodeLocation-inl.h b/js/src/vm/BytecodeLocation-inl.h
index 46c945ddad..ee5281f16b 100644
--- a/js/src/vm/BytecodeLocation-inl.h
+++ b/js/src/vm/BytecodeLocation-inl.h
@@ -32,6 +32,11 @@ inline JSString* BytecodeLocation::getString(const JSScript* script) const {
return script->getString(this->rawBytecode_);
}
+inline bool BytecodeLocation::atomizeString(JSContext* cx, JSScript* script) {
+ MOZ_ASSERT(this->isValid());
+ return script->atomizeString(cx, this->rawBytecode_);
+}
+
inline PropertyName* BytecodeLocation::getPropertyName(
const JSScript* script) const {
MOZ_ASSERT(this->isValid());
diff --git a/js/src/vm/BytecodeLocation.h b/js/src/vm/BytecodeLocation.h
index 8109b5f2fa..c5bd1d2f5d 100644
--- a/js/src/vm/BytecodeLocation.h
+++ b/js/src/vm/BytecodeLocation.h
@@ -15,6 +15,7 @@
#include "vm/CompletionKind.h" // CompletionKind
#include "vm/FunctionPrefixKind.h" // FunctionPrefixKind
#include "vm/GeneratorResumeKind.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
namespace js {
@@ -103,6 +104,7 @@ class BytecodeLocation {
inline JSAtom* getAtom(const JSScript* script) const;
inline JSString* getString(const JSScript* script) const;
+ inline bool atomizeString(JSContext* cx, JSScript* script);
inline PropertyName* getPropertyName(const JSScript* script) const;
inline JS::BigInt* getBigInt(const JSScript* script) const;
inline JSObject* getObject(const JSScript* script) const;
@@ -198,8 +200,6 @@ class BytecodeLocation {
bool isStrictSetOp() const { return IsStrictSetPC(rawBytecode_); }
- bool isNameOp() const { return IsNameOp(getOp()); }
-
bool isSpreadOp() const { return IsSpreadOp(getOp()); }
bool isInvokeOp() const { return IsInvokeOp(getOp()); }
@@ -279,6 +279,11 @@ class BytecodeLocation {
return index;
}
+ TypeofEqOperand getTypeofEqOperand() const {
+ MOZ_ASSERT(is(JSOp::TypeofEq));
+ return TypeofEqOperand::fromRawValue(GET_UINT8(rawBytecode_));
+ }
+
FunctionPrefixKind getFunctionPrefixKind() const {
MOZ_ASSERT(is(JSOp::SetFunName));
return FunctionPrefixKind(GET_UINT8(rawBytecode_));
diff --git a/js/src/vm/BytecodeUtil.cpp b/js/src/vm/BytecodeUtil.cpp
index cc8f545387..82b21cb508 100644
--- a/js/src/vm/BytecodeUtil.cpp
+++ b/js/src/vm/BytecodeUtil.cpp
@@ -53,7 +53,8 @@
#include "vm/Opcodes.h"
#include "vm/Realm.h"
#include "vm/Shape.h"
-#include "vm/ToSource.h" // js::ValueToSource
+#include "vm/ToSource.h" // js::ValueToSource
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "gc/GC-inl.h"
#include "vm/BytecodeIterator-inl.h"
@@ -1893,6 +1894,16 @@ bool ExpressionDecompiler::decompilePC(jsbytecode* pc, uint8_t defIndex) {
return write("(typeof ") && decompilePCForStackOperand(pc, -1) &&
write(")");
+ case JSOp::TypeofEq: {
+ auto operand = TypeofEqOperand::fromRawValue(GET_UINT8(pc));
+ JSType type = operand.type();
+ JSOp compareOp = operand.compareOp();
+
+ return write("(typeof ") && decompilePCForStackOperand(pc, -1) &&
+ write(compareOp == JSOp::Ne ? " != \"" : " == \"") &&
+ write(JSTypeToString(type)) && write("\")");
+ }
+
case JSOp::InitElemArray:
return write("[...]");
diff --git a/js/src/vm/BytecodeUtil.h b/js/src/vm/BytecodeUtil.h
index 09ff4387bb..3f0d4d07e4 100644
--- a/js/src/vm/BytecodeUtil.h
+++ b/js/src/vm/BytecodeUtil.h
@@ -39,10 +39,6 @@ class JS_PUBLIC_API StringPrinter;
static inline uint32_t JOF_TYPE(uint32_t fmt) { return fmt & JOF_TYPEMASK; }
-/* Shorthand for mode from format. */
-
-static inline uint32_t JOF_MODE(uint32_t fmt) { return fmt & JOF_MODEMASK; }
-
/*
* Immediate operand getters, setters, and bounds.
*/
@@ -462,8 +458,6 @@ inline bool IsCheckStrictOp(JSOp op) {
return CodeSpec(op).format & JOF_CHECKSTRICT;
}
-inline bool IsNameOp(JSOp op) { return CodeSpec(op).format & JOF_NAME; }
-
#ifdef DEBUG
inline bool IsCheckSloppyOp(JSOp op) {
return CodeSpec(op).format & JOF_CHECKSLOPPY;
@@ -510,10 +504,6 @@ inline bool IsSetElemOp(JSOp op) {
inline bool IsSetElemPC(const jsbytecode* pc) { return IsSetElemOp(JSOp(*pc)); }
-inline bool IsElemPC(const jsbytecode* pc) {
- return CodeSpec(JSOp(*pc)).format & JOF_ELEM;
-}
-
inline bool IsInvokeOp(JSOp op) { return CodeSpec(op).format & JOF_INVOKE; }
inline bool IsInvokePC(jsbytecode* pc) { return IsInvokeOp(JSOp(*pc)); }
@@ -534,6 +524,12 @@ inline bool IsSpreadOp(JSOp op) { return CodeSpec(op).format & JOF_SPREAD; }
inline bool IsSpreadPC(const jsbytecode* pc) { return IsSpreadOp(JSOp(*pc)); }
+// Returns true if the specified opcode is for `typeof name` where `name` is
+// single identifier.
+inline bool IsTypeOfNameOp(JSOp op) {
+ return op == JSOp::Typeof || op == JSOp::TypeofEq;
+}
+
inline bool OpUsesEnvironmentChain(JSOp op) {
return CodeSpec(op).format & JOF_USES_ENV;
}
diff --git a/js/src/vm/Caches.h b/js/src/vm/Caches.h
index dcd0c78822..82979c1283 100644
--- a/js/src/vm/Caches.h
+++ b/js/src/vm/Caches.h
@@ -43,10 +43,15 @@ struct EvalCacheEntry {
};
struct EvalCacheLookup {
- explicit EvalCacheLookup(JSContext* cx) : str(cx), callerScript(cx) {}
- Rooted<JSLinearString*> str;
- RootedScript callerScript;
- MOZ_INIT_OUTSIDE_CTOR jsbytecode* pc;
+ JSLinearString* str = nullptr;
+ JSScript* callerScript = nullptr;
+ MOZ_INIT_OUTSIDE_CTOR jsbytecode* pc = nullptr;
+
+ EvalCacheLookup() = default;
+ EvalCacheLookup(JSLinearString* str, JSScript* callerScript, jsbytecode* pc)
+ : str(str), callerScript(callerScript), pc(pc) {}
+
+ void trace(JSTracer* trc);
};
struct EvalCacheHashPolicy {
diff --git a/js/src/vm/CharacterEncoding.cpp b/js/src/vm/CharacterEncoding.cpp
index 3d05275e2d..8911612a7a 100644
--- a/js/src/vm/CharacterEncoding.cpp
+++ b/js/src/vm/CharacterEncoding.cpp
@@ -286,7 +286,6 @@ static bool InflateUTF8ToUTF16(JSContext* cx, const UTF8Chars& src,
break;
}
} else {
-
#define INVALID(report, arg, n2) \
do { \
if (ErrorAction == OnUTF8Error::Throw) { \
diff --git a/js/src/vm/CommonPropertyNames.h b/js/src/vm/CommonPropertyNames.h
index 5fa3f2b633..a8e874d410 100644
--- a/js/src/vm/CommonPropertyNames.h
+++ b/js/src/vm/CommonPropertyNames.h
@@ -185,6 +185,7 @@
MACRO_(exponentSeparator, "exponentSeparator") \
MACRO_(export_, "export") \
MACRO_(extends, "extends") \
+ MACRO_(f16round, "f16round") \
MACRO_(false_, "false") \
MACRO_(few, "few") \
IF_DECORATORS(MACRO_(field, "field")) \
@@ -233,6 +234,7 @@
MACRO_(GetBuiltinConstructor, "GetBuiltinConstructor") \
MACRO_(GetBuiltinPrototype, "GetBuiltinPrototype") \
MACRO_(GetBuiltinSymbol, "GetBuiltinSymbol") \
+ MACRO_(getFloat16, "getFloat16") \
MACRO_(GetInternalError, "GetInternalError") \
MACRO_(getInternals, "getInternals") \
MACRO_(GetIterator, "GetIterator") \
@@ -335,6 +337,7 @@
MACRO_(iterate, "iterate") \
MACRO_(join, "join") \
MACRO2(js, "js") \
+ MACRO_(jsTag, "JSTag") \
MACRO_(jsStringModule, "js-string") \
MACRO_(json, "json") \
MACRO_(keys, "keys") \
@@ -508,6 +511,7 @@
MACRO_(setBigUint64, "setBigUint64") \
MACRO_(SetCanonicalName, "SetCanonicalName") \
MACRO_(SetConstructorInit, "SetConstructorInit") \
+ MACRO_(setFloat16, "setFloat16") \
MACRO_(SetIsInlinableLargeFunction, "SetIsInlinableLargeFunction") \
MACRO_(Set_Iterator_, "Set Iterator") \
MACRO_(setFromBase64, "setFromBase64") \
diff --git a/js/src/vm/Compartment.cpp b/js/src/vm/Compartment.cpp
index 4efb92366b..749342b167 100644
--- a/js/src/vm/Compartment.cpp
+++ b/js/src/vm/Compartment.cpp
@@ -52,14 +52,10 @@ Compartment::Compartment(Zone* zone, bool invisibleToDebugger)
void Compartment::checkObjectWrappersAfterMovingGC() {
for (ObjectWrapperEnum e(this); !e.empty(); e.popFront()) {
- // Assert that the postbarriers have worked and that nothing is left in the
- // wrapper map that points into the nursery, and that the hash table entries
- // are discoverable.
auto key = e.front().key();
- CheckGCThingAfterMovingGC(key.get());
-
- auto ptr = crossCompartmentObjectWrappers.lookup(key);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &e.front());
+ CheckGCThingAfterMovingGC(key.get()); // Keys may be in a different zone.
+ CheckGCThingAfterMovingGC(e.front().value().unbarrieredGet(), zone());
+ CheckTableEntryAfterMovingGC(crossCompartmentObjectWrappers, e, key);
}
}
diff --git a/js/src/vm/EnvironmentObject.cpp b/js/src/vm/EnvironmentObject.cpp
index 008cfca260..0d5f214f53 100644
--- a/js/src/vm/EnvironmentObject.cpp
+++ b/js/src/vm/EnvironmentObject.cpp
@@ -2613,18 +2613,20 @@ void DebugEnvironments::checkHashTablesAfterMovingGC() {
* This is called at the end of StoreBuffer::mark() to check that our
* postbarriers have worked and that no hashtable keys (or values) are left
* pointing into the nursery.
+ *
+ * |proxiedEnvs| is checked automatically because it is a WeakMap.
*/
- proxiedEnvs.checkAfterMovingGC();
- for (MissingEnvironmentMap::Range r = missingEnvs.all(); !r.empty();
- r.popFront()) {
- CheckGCThingAfterMovingGC(r.front().key().scope());
+ CheckTableAfterMovingGC(missingEnvs, [this](const auto& entry) {
+ CheckGCThingAfterMovingGC(entry.key().scope(), zone());
// Use unbarrieredGet() to prevent triggering read barrier while collecting.
- CheckGCThingAfterMovingGC(r.front().value().unbarrieredGet());
- }
- for (LiveEnvironmentMap::Range r = liveEnvs.all(); !r.empty(); r.popFront()) {
- CheckGCThingAfterMovingGC(r.front().key());
- CheckGCThingAfterMovingGC(r.front().value().scope_.get());
- }
+ CheckGCThingAfterMovingGC(entry.value().unbarrieredGet(), zone());
+ return entry.key();
+ });
+ CheckTableAfterMovingGC(liveEnvs, [this](const auto& entry) {
+ CheckGCThingAfterMovingGC(entry.key(), zone());
+ CheckGCThingAfterMovingGC(entry.value().scope_.get(), zone());
+ return entry.key().unbarrieredGet();
+ });
}
#endif
diff --git a/js/src/vm/Float16.h b/js/src/vm/Float16.h
new file mode 100644
index 0000000000..e9c3268af4
--- /dev/null
+++ b/js/src/vm/Float16.h
@@ -0,0 +1,150 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_Float16_h
+#define vm_Float16_h
+
+#include <cstdint>
+#include <cstring>
+#include <limits>
+
+namespace js {
+
+namespace half {
+// This is extracted from Version 2.2.0 of the half library by Christian Rau.
+// See https://sourceforge.net/projects/half/.
+// The original copyright and MIT license are reproduced below:
+
+// half - IEEE 754-based half-precision floating-point library.
+//
+// Copyright (c) 2012-2021 Christian Rau <rauy@users.sourceforge.net>
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+// SOFTWARE.
+
+/// Type traits for floating-point bits.
+template <typename T>
+struct bits {
+ typedef unsigned char type;
+};
+template <typename T>
+struct bits<const T> : bits<T> {};
+template <typename T>
+struct bits<volatile T> : bits<T> {};
+template <typename T>
+struct bits<const volatile T> : bits<T> {};
+
+/// Unsigned integer of (at least) 64 bits width.
+template <>
+struct bits<double> {
+ typedef std::uint_least64_t type;
+};
+
+/// Fastest unsigned integer of (at least) 32 bits width.
+typedef std::uint_fast32_t uint32;
+
+/// Half-precision overflow.
+/// \param sign half-precision value with sign bit only
+/// \return rounded overflowing half-precision value
+constexpr unsigned int overflow(unsigned int sign = 0) { return sign | 0x7C00; }
+
+/// Half-precision underflow.
+/// \param sign half-precision value with sign bit only
+/// \return rounded underflowing half-precision value
+constexpr unsigned int underflow(unsigned int sign = 0) { return sign; }
+
+/// Round half-precision number.
+/// \param value finite half-precision number to round
+/// \param g guard bit (most significant discarded bit)
+/// \param s sticky bit (or of all but the most significant discarded bits)
+/// \return rounded half-precision value
+constexpr unsigned int rounded(unsigned int value, int g, int s) {
+ return value + (g & (s | value));
+}
+
+/// Convert IEEE double-precision to half-precision.
+/// \param value double-precision value to convert
+/// \return rounded half-precision value
+inline unsigned int float2half_impl(double value) {
+ bits<double>::type dbits;
+ std::memcpy(&dbits, &value, sizeof(double));
+ uint32 hi = dbits >> 32, lo = dbits & 0xFFFFFFFF;
+ unsigned int sign = (hi >> 16) & 0x8000;
+ hi &= 0x7FFFFFFF;
+ if (hi >= 0x7FF00000)
+ return sign | 0x7C00 |
+ ((dbits & 0xFFFFFFFFFFFFF) ? (0x200 | ((hi >> 10) & 0x3FF)) : 0);
+ if (hi >= 0x40F00000) return overflow(sign);
+ if (hi >= 0x3F100000)
+ return rounded(sign | (((hi >> 20) - 1008) << 10) | ((hi >> 10) & 0x3FF),
+ (hi >> 9) & 1, ((hi & 0x1FF) | lo) != 0);
+ if (hi >= 0x3E600000) {
+ int i = 1018 - (hi >> 20);
+ hi = (hi & 0xFFFFF) | 0x100000;
+ return rounded(sign | (hi >> (i + 1)), (hi >> i) & 1,
+ ((hi & ((static_cast<uint32>(1) << i) - 1)) | lo) != 0);
+ }
+ if ((hi | lo) != 0) return underflow(sign);
+ return sign;
+}
+
+/// Convert half-precision to IEEE double-precision.
+/// \param value half-precision value to convert
+/// \return double-precision value
+inline double half2float_impl(unsigned int value) {
+ uint32 hi = static_cast<uint32>(value & 0x8000) << 16;
+ unsigned int abs = value & 0x7FFF;
+ if (abs) {
+ hi |= 0x3F000000 << static_cast<unsigned>(abs >= 0x7C00);
+ for (; abs < 0x400; abs <<= 1, hi -= 0x100000)
+ ;
+ hi += static_cast<uint32>(abs) << 10;
+ }
+ bits<double>::type dbits = static_cast<bits<double>::type>(hi) << 32;
+ double out;
+ std::memcpy(&out, &dbits, sizeof(double));
+ return out;
+}
+} // namespace half
+
+struct float16 {
+ uint16_t val;
+
+ float16() = default;
+ float16(const float16& other) = default;
+
+ explicit float16(double x) { *this = x; }
+
+ float16& operator=(const float16& x) = default;
+
+ float16& operator=(double x) {
+ this->val = half::float2half_impl(x);
+ return *this;
+ }
+
+ double toDouble() { return half::half2float_impl(this->val); }
+};
+
+static_assert(sizeof(float16) == 2, "float16 has no extra padding");
+
+} // namespace js
+
+#endif // vm_Float16_h
diff --git a/js/src/vm/FunctionFlags.h b/js/src/vm/FunctionFlags.h
index 27d51c214a..558c71a6b7 100644
--- a/js/src/vm/FunctionFlags.h
+++ b/js/src/vm/FunctionFlags.h
@@ -94,6 +94,11 @@ class FunctionFlags {
BASESCRIPT = 1 << 5,
SELFHOSTLAZY = 1 << 6,
+ // This Native function has a JIT entry which emulates the
+ // js::BaseScript::jitCodeRaw mechanism. Used for Wasm functions and
+ // TrampolineNative builtins.
+ NATIVE_JIT_ENTRY = 1 << 7,
+
// Function may be called as a constructor. This corresponds in the spec as
// having a [[Construct]] internal method.
//
@@ -103,12 +108,7 @@ class FunctionFlags {
// This flag is used both by scripted functions and native functions.
//
// WARNING: This is independent from FunctionKind::ClassConstructor.
- CONSTRUCTOR = 1 << 7,
-
- // Function is either getter or setter, with "get " or "set " prefix,
- // but JSFunction::AtomSlot contains unprefixed name, and the function name
- // is lazily constructed on the first access.
- LAZY_ACCESSOR_NAME = 1 << 8,
+ CONSTRUCTOR = 1 << 8,
// Function comes from a FunctionExpression, ArrowFunction, or Function()
// call (not a FunctionDeclaration).
@@ -116,10 +116,10 @@ class FunctionFlags {
// This flag is used only by scripted functions and AsmJS.
LAMBDA = 1 << 9,
- // This Native function has a JIT entry which emulates the
- // js::BaseScript::jitCodeRaw mechanism. Used for Wasm functions and
- // TrampolineNative builtins.
- NATIVE_JIT_ENTRY = 1 << 10,
+ // Function is either getter or setter, with "get " or "set " prefix,
+ // but JSFunction::AtomSlot contains unprefixed name, and the function name
+ // is lazily constructed on the first access.
+ LAZY_ACCESSOR_NAME = 1 << 10,
// Function had no explicit name, but a name was set by SetFunctionName at
// compile time or SetFunctionName at runtime.
@@ -436,12 +436,8 @@ class FunctionFlags {
FunctionFlags& setIsGhost() { return setFlags(GHOST_FUNCTION); }
bool isGhost() const { return hasFlags(GHOST_FUNCTION); }
- static uint16_t HasJitEntryFlags(bool isConstructing) {
- uint16_t flags = BASESCRIPT | SELFHOSTLAZY;
- if (!isConstructing) {
- flags |= NATIVE_JIT_ENTRY;
- }
- return flags;
+ static constexpr uint16_t HasJitEntryFlags() {
+ return BASESCRIPT | SELFHOSTLAZY | NATIVE_JIT_ENTRY;
}
static FunctionFlags clearMutableflags(FunctionFlags flags) {
diff --git a/js/src/vm/GeckoProfiler.cpp b/js/src/vm/GeckoProfiler.cpp
index dbf1eb9081..91f03b3432 100644
--- a/js/src/vm/GeckoProfiler.cpp
+++ b/js/src/vm/GeckoProfiler.cpp
@@ -371,12 +371,11 @@ void GeckoProfilerRuntime::fixupStringsMapAfterMovingGC() {
#ifdef JSGC_HASH_TABLE_CHECKS
void GeckoProfilerRuntime::checkStringsMapAfterMovingGC() {
- for (auto r = strings().all(); !r.empty(); r.popFront()) {
- BaseScript* script = r.front().key();
+ CheckTableAfterMovingGC(strings(), [](const auto& entry) {
+ BaseScript* script = entry.key();
CheckGCThingAfterMovingGC(script);
- auto ptr = strings().lookup(script);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
- }
+ return script;
+ });
}
#endif
diff --git a/js/src/vm/GlobalObject.cpp b/js/src/vm/GlobalObject.cpp
index 6782433fd3..8bdfd3ee75 100644
--- a/js/src/vm/GlobalObject.cpp
+++ b/js/src/vm/GlobalObject.cpp
@@ -186,6 +186,9 @@ bool GlobalObject::skipDeselectedConstructor(JSContext* cx, JSProtoKey key) {
#ifdef ENABLE_WASM_TYPE_REFLECTIONS
case JSProto_WasmFunction:
#endif
+#ifdef ENABLE_WASM_JSPI
+ case JSProto_WasmSuspending:
+#endif
case JSProto_WasmException:
return false;
@@ -241,6 +244,11 @@ bool GlobalObject::skipDeselectedConstructor(JSContext* cx, JSProtoKey key) {
case JSProto_ShadowRealm:
return !JS::Prefs::experimental_shadow_realms();
+#ifdef NIGHTLY_BUILD
+ case JSProto_Float16Array:
+ return !JS::Prefs::experimental_float16array();
+#endif
+
default:
MOZ_CRASH("unexpected JSProtoKey");
}
@@ -920,6 +928,10 @@ bool GlobalObject::getSelfHostedFunction(JSContext* cx,
return true;
}
+ // Don't collect metadata for self-hosted functions or intrinsics.
+ // This is similar to the suppression in GlobalObject::resolveConstructor.
+ AutoSuppressAllocationMetadataBuilder suppressMetadata(cx);
+
JSRuntime* runtime = cx->runtime();
frontend::ScriptIndex index =
runtime->getSelfHostedScriptIndexRange(selfHostedName)->start;
@@ -940,6 +952,10 @@ bool GlobalObject::getIntrinsicValueSlow(JSContext* cx,
Handle<GlobalObject*> global,
Handle<PropertyName*> name,
MutableHandleValue value) {
+ // Don't collect metadata for self-hosted functions or intrinsics.
+ // This is similar to the suppression in GlobalObject::resolveConstructor.
+ AutoSuppressAllocationMetadataBuilder suppressMetadata(cx);
+
// If this is a C++ intrinsic, simply define the function on the intrinsics
// holder.
if (const JSFunctionSpec* spec = js::FindIntrinsicSpec(name)) {
diff --git a/js/src/vm/HelperThreadState.h b/js/src/vm/HelperThreadState.h
index a43efef7af..8bd8b797b4 100644
--- a/js/src/vm/HelperThreadState.h
+++ b/js/src/vm/HelperThreadState.h
@@ -81,9 +81,6 @@ typedef Vector<Tier2GeneratorTask*, 0, SystemAllocPolicy>
// Per-process state for off thread work items.
class GlobalHelperThreadState {
- friend class AutoLockHelperThreadState;
- friend class AutoUnlockHelperThreadState;
-
public:
// A single tier-2 ModuleGenerator job spawns many compilation jobs, and we
// do not want to allow more than one such ModuleGenerator to run at a time.
@@ -182,6 +179,7 @@ class GlobalHelperThreadState {
// JS::SetHelperThreadTaskCallback. If this is not set the internal thread
// pool is used.
JS::HelperThreadTaskCallback dispatchTaskCallback = nullptr;
+ friend class AutoHelperTaskQueue;
// The number of tasks dispatched to the thread pool that have not started
// running yet.
@@ -228,7 +226,7 @@ class GlobalHelperThreadState {
void assertIsLockedByCurrentThread() const;
#endif
- void wait(AutoLockHelperThreadState& locked,
+ void wait(AutoLockHelperThreadState& lock,
mozilla::TimeDuration timeout = mozilla::TimeDuration::Forever());
void notifyAll(const AutoLockHelperThreadState&);
diff --git a/js/src/vm/HelperThreads.cpp b/js/src/vm/HelperThreads.cpp
index da8231c1dc..d50c9a134a 100644
--- a/js/src/vm/HelperThreads.cpp
+++ b/js/src/vm/HelperThreads.cpp
@@ -889,25 +889,23 @@ void GlobalHelperThreadState::assertIsLockedByCurrentThread() const {
}
#endif // DEBUG
-void GlobalHelperThreadState::dispatch(
- DispatchReason reason, const AutoLockHelperThreadState& locked) {
- if (canStartTasks(locked) && tasksPending_ < threadCount) {
+void GlobalHelperThreadState::dispatch(DispatchReason reason,
+ const AutoLockHelperThreadState& lock) {
+ if (canStartTasks(lock) && tasksPending_ < threadCount) {
// This doesn't guarantee that we don't dispatch more tasks to the external
// pool than necessary if tasks are taking a long time to start, but it does
// limit the number.
tasksPending_++;
- // The hazard analysis can't tell that the callback doesn't GC.
- JS::AutoSuppressGCAnalysis nogc;
-
- dispatchTaskCallback(reason);
+ lock.queueTaskToDispatch(reason);
}
}
void GlobalHelperThreadState::wait(
- AutoLockHelperThreadState& locked,
+ AutoLockHelperThreadState& lock,
TimeDuration timeout /* = TimeDuration::Forever() */) {
- consumerWakeup.wait_for(locked, timeout);
+ MOZ_ASSERT(!lock.hasQueuedTasks());
+ consumerWakeup.wait_for(lock, timeout);
}
void GlobalHelperThreadState::notifyAll(const AutoLockHelperThreadState&) {
@@ -1534,6 +1532,10 @@ void js::RunPendingSourceCompressions(JSRuntime* runtime) {
HelperThreadState().startHandlingCompressionTasks(
GlobalHelperThreadState::ScheduleCompressionTask::API, nullptr, lock);
+ {
+ // Dispatch tasks.
+ AutoUnlockHelperThreadState unlock(lock);
+ }
// Wait until all tasks have started compression.
while (!HelperThreadState().compressionWorklist(lock).empty()) {
@@ -1735,3 +1737,28 @@ void GlobalHelperThreadState::runTaskLocked(HelperThreadTask* task,
js::oom::SetThreadType(js::THREAD_TYPE_NONE);
}
+
+void AutoHelperTaskQueue::queueTaskToDispatch(JS::DispatchReason reason) const {
+ // This is marked const because it doesn't release the mutex.
+
+ if (reason == JS::DispatchReason::FinishedTask) {
+ finishedTasksToDispatch++;
+ return;
+ }
+
+ newTasksToDispatch++;
+}
+
+void AutoHelperTaskQueue::dispatchQueuedTasks() {
+ // The hazard analysis can't tell that the callback doesn't GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ for (size_t i = 0; i < newTasksToDispatch; i++) {
+ HelperThreadState().dispatchTaskCallback(JS::DispatchReason::NewTask);
+ }
+ for (size_t i = 0; i < finishedTasksToDispatch; i++) {
+ HelperThreadState().dispatchTaskCallback(JS::DispatchReason::FinishedTask);
+ }
+ newTasksToDispatch = 0;
+ finishedTasksToDispatch = 0;
+}
diff --git a/js/src/vm/HelperThreads.h b/js/src/vm/HelperThreads.h
index 430511d104..3827168f98 100644
--- a/js/src/vm/HelperThreads.h
+++ b/js/src/vm/HelperThreads.h
@@ -14,6 +14,7 @@
#include "mozilla/Variant.h"
#include "js/AllocPolicy.h"
+#include "js/HelperThreadAPI.h"
#include "js/shadow/Zone.h"
#include "js/UniquePtr.h"
#include "js/Vector.h"
@@ -68,21 +69,44 @@ using UniqueTier2GeneratorTask = UniquePtr<Tier2GeneratorTask>;
*/
extern Mutex gHelperThreadLock MOZ_UNANNOTATED;
-class MOZ_RAII AutoLockHelperThreadState : public LockGuard<Mutex> {
- using Base = LockGuard<Mutex>;
-
+// Set of tasks to dispatch when the helper thread state lock is released.
+class AutoHelperTaskQueue {
public:
- explicit AutoLockHelperThreadState() : Base(gHelperThreadLock) {}
+ ~AutoHelperTaskQueue() { dispatchQueuedTasks(); }
+ bool hasQueuedTasks() const {
+ return newTasksToDispatch || finishedTasksToDispatch;
+ }
+ void queueTaskToDispatch(JS::DispatchReason reason) const;
+ void dispatchQueuedTasks();
+
+ private:
+ mutable uint32_t newTasksToDispatch = 0;
+ mutable uint32_t finishedTasksToDispatch = 0;
};
-class MOZ_RAII AutoUnlockHelperThreadState : public UnlockGuard<Mutex> {
- using Base = UnlockGuard<Mutex>;
-
+// A lock guard for data protected by the helper thread lock.
+//
+// This can also queue helper thread tasks to be triggered when the lock is
+// released.
+class MOZ_RAII AutoLockHelperThreadState
+ : public AutoHelperTaskQueue, // Must come before LockGuard.
+ public LockGuard<Mutex> {
public:
- explicit AutoUnlockHelperThreadState(AutoLockHelperThreadState& locked)
- : Base(locked) {}
+ AutoLockHelperThreadState() : LockGuard<Mutex>(gHelperThreadLock) {}
+ AutoLockHelperThreadState(const AutoLockHelperThreadState&) = delete;
+
+ private:
+ friend class UnlockGuard<AutoLockHelperThreadState>;
+ void unlock() {
+ LockGuard<Mutex>::unlock();
+ dispatchQueuedTasks();
+ }
+
+ friend class GlobalHelperThreadState;
};
+using AutoUnlockHelperThreadState = UnlockGuard<AutoLockHelperThreadState>;
+
// Create data structures used by helper threads.
bool CreateHelperThreadsState();
diff --git a/js/src/vm/InternalThreadPool.cpp b/js/src/vm/InternalThreadPool.cpp
index 483e995254..c6cdac1d44 100644
--- a/js/src/vm/InternalThreadPool.cpp
+++ b/js/src/vm/InternalThreadPool.cpp
@@ -187,7 +187,10 @@ void InternalThreadPool::DispatchTask(JS::DispatchReason reason) {
}
void InternalThreadPool::dispatchTask(JS::DispatchReason reason) {
- gHelperThreadLock.assertOwnedByCurrentThread();
+ // This could now use a separate mutex like TaskController, but continues to
+ // use the helper thread state lock for convenience.
+ AutoLockHelperThreadState lock;
+
queuedTasks++;
if (reason == JS::DispatchReason::NewTask) {
wakeup.notify_one();
@@ -279,7 +282,9 @@ void HelperThread::threadLoop(InternalThreadPool* pool) {
while (!pool->terminating) {
if (pool->queuedTasks != 0) {
pool->queuedTasks--;
- HelperThreadState().runOneTask(lock);
+
+ AutoUnlockHelperThreadState unlock(lock);
+ JS::RunHelperThreadTask();
continue;
}
diff --git a/js/src/vm/Interpreter.cpp b/js/src/vm/Interpreter.cpp
index f4cdc86f18..f005c12949 100644
--- a/js/src/vm/Interpreter.cpp
+++ b/js/src/vm/Interpreter.cpp
@@ -63,6 +63,7 @@
#include "vm/StringType.h"
#include "vm/ThrowMsgKind.h" // ThrowMsgKind
#include "vm/Time.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#ifdef ENABLE_RECORD_TUPLE
# include "vm/RecordType.h"
# include "vm/TupleType.h"
@@ -249,7 +250,7 @@ static inline bool GetNameOperation(JSContext* cx, HandleObject envChain,
Handle<PropertyName*> name, JSOp nextOp,
MutableHandleValue vp) {
/* Kludge to allow (typeof foo == "undefined") tests. */
- if (nextOp == JSOp::Typeof) {
+ if (IsTypeOfNameOp(nextOp)) {
return GetEnvironmentName<GetNameMode::TypeOf>(cx, envChain, name, vp);
}
return GetEnvironmentName<GetNameMode::Normal>(cx, envChain, name, vp);
@@ -2650,6 +2651,16 @@ bool MOZ_NEVER_INLINE JS_HAZ_JSNATIVE_CALLER js::Interpret(JSContext* cx,
}
END_CASE(Typeof)
+ CASE(TypeofEq) {
+ auto operand = TypeofEqOperand::fromRawValue(GET_UINT8(REGS.pc));
+ bool result = js::TypeOfValue(REGS.sp[-1]) == operand.type();
+ if (operand.compareOp() == JSOp::Ne) {
+ result = !result;
+ }
+ REGS.sp[-1].setBoolean(result);
+ }
+ END_CASE(TypeofEq)
+
CASE(Void) { REGS.sp[-1].setUndefined(); }
END_CASE(Void)
@@ -4140,13 +4151,7 @@ bool MOZ_NEVER_INLINE JS_HAZ_JSNATIVE_CALLER js::Interpret(JSContext* cx,
}
if (!DebugAPI::onResumeFrame(cx, REGS.fp())) {
- if (cx->isPropagatingForcedReturn()) {
- MOZ_ASSERT_IF(
- REGS.fp()
- ->callee()
- .isGenerator(), // as opposed to an async function
- gen->isClosed());
- }
+ MOZ_ASSERT_IF(cx->isPropagatingForcedReturn(), gen->isClosed());
goto error;
}
}
diff --git a/js/src/vm/JSAtomUtils.cpp b/js/src/vm/JSAtomUtils.cpp
index 2f8b066f0c..157e788964 100644
--- a/js/src/vm/JSAtomUtils.cpp
+++ b/js/src/vm/JSAtomUtils.cpp
@@ -666,6 +666,10 @@ JSAtom* js::AtomizeString(JSContext* cx, JSString* str) {
return &str->asAtom();
}
+ if (str->isAtomRef()) {
+ return str->atom();
+ }
+
if (JSAtom* atom = cx->caches().stringToAtomCache.lookup(str)) {
return atom;
}
@@ -691,6 +695,7 @@ JSAtom* js::AtomizeString(JSContext* cx, JSString* str) {
// not done in lookup() itself, because #including JSContext.h there
// causes some non-trivial #include ordering issues.
cx->markAtom(atom);
+ str->tryReplaceWithAtomRef(atom);
return atom;
}
}
@@ -723,7 +728,9 @@ JSAtom* js::AtomizeString(JSContext* cx, JSString* str) {
return nullptr;
}
- cx->caches().stringToAtomCache.maybePut(str, atom, key);
+ if (!str->tryReplaceWithAtomRef(atom)) {
+ cx->caches().stringToAtomCache.maybePut(str, atom, key);
+ }
return atom;
}
diff --git a/js/src/vm/JSContext.cpp b/js/src/vm/JSContext.cpp
index 3cc2c4807c..6ffe04b190 100644
--- a/js/src/vm/JSContext.cpp
+++ b/js/src/vm/JSContext.cpp
@@ -1038,6 +1038,16 @@ JSContext::JSContext(JSRuntime* runtime, const JS::ContextOptions& options)
JS::RootingContext::get(this));
}
+#ifdef ENABLE_WASM_JSPI
+bool js::IsSuspendableStackActive(JSContext* cx) {
+ return cx->wasm().suspendableStackLimit != JS::NativeStackLimitMin;
+}
+
+JS::NativeStackLimit js::GetSuspendableStackLimit(JSContext* cx) {
+ return cx->wasm().suspendableStackLimit;
+}
+#endif
+
JSContext::~JSContext() {
#ifdef DEBUG
// Clear the initialized_ first, so that ProtectedData checks will allow us to
@@ -1256,6 +1266,9 @@ void JSContext::trace(JSTracer* trc) {
if (isolate) {
irregexp::TraceIsolate(trc, isolate.ref());
}
+#ifdef ENABLE_WASM_JSPI
+ wasm().promiseIntegration.trace(trc);
+#endif
}
JS::NativeStackLimit JSContext::stackLimitForJitCode(JS::StackKind kind) {
diff --git a/js/src/vm/JSContext.h b/js/src/vm/JSContext.h
index ba665d6c1a..df09fa40c8 100644
--- a/js/src/vm/JSContext.h
+++ b/js/src/vm/JSContext.h
@@ -416,6 +416,16 @@ struct JS_PUBLIC_API JSContext : public JS::RootingContext,
/* If non-null, report JavaScript entry points to this monitor. */
js::ContextData<JS::dbg::AutoEntryMonitor*> entryMonitor;
+ // In brittle mode, any failure will produce a diagnostic assertion rather
+ // than propagating an error or throwing an exception. This is used for
+ // intermittent crash diagnostics: if an operation is failing for unknown
+ // reasons, turn on brittle mode and annotate the operations within
+ // SpiderMonkey that the failing operation uses with:
+ //
+ // MOZ_DIAGNOSTIC_ASSERT(!cx->brittleMode, "specific failure");
+ //
+ bool brittleMode = false;
+
/*
* Stack of debuggers that currently disallow debuggee execution.
*
diff --git a/js/src/vm/JSONParser.cpp b/js/src/vm/JSONParser.cpp
index 7a440e3090..54e15176a6 100644
--- a/js/src/vm/JSONParser.cpp
+++ b/js/src/vm/JSONParser.cpp
@@ -1428,35 +1428,34 @@ class MOZ_STACK_CLASS DelegateHandler {
}
inline bool setBooleanValue(bool value, mozilla::Span<const CharT>&& source) {
- return true;
- }
- inline bool setNullValue(mozilla::Span<const CharT>&& source) { return true; }
-
- inline DummyValue numberValue() const { return DummyValue(); }
-
- inline DummyValue stringValue() const { return DummyValue(); }
-
- inline DummyValue booleanValue(bool value) {
if (hadHandlerError_) {
- return DummyValue();
+ return false;
}
if (!handler_->booleanValue(value)) {
hadHandlerError_ = true;
}
- return DummyValue();
+ return !hadHandlerError_;
}
- inline DummyValue nullValue() {
+ inline bool setNullValue(mozilla::Span<const CharT>&& source) {
if (hadHandlerError_) {
- return DummyValue();
+ return false;
}
if (!handler_->nullValue()) {
hadHandlerError_ = true;
}
- return DummyValue();
+ return !hadHandlerError_;
}
+ inline DummyValue numberValue() const { return DummyValue(); }
+
+ inline DummyValue stringValue() const { return DummyValue(); }
+
+ inline DummyValue booleanValue(bool value) { return DummyValue(); }
+
+ inline DummyValue nullValue() { return DummyValue(); }
+
inline bool objectOpen(Vector<StackEntry, 10>& stack,
PropertyVector** properties) {
if (hadHandlerError_) {
diff --git a/js/src/vm/JSObject.cpp b/js/src/vm/JSObject.cpp
index 8bc8bc0d52..663b9c2260 100644
--- a/js/src/vm/JSObject.cpp
+++ b/js/src/vm/JSObject.cpp
@@ -1215,6 +1215,10 @@ void JSObject::swap(JSContext* cx, HandleObject a, HandleObject b,
MOZ_RELEASE_ASSERT(js::ObjectMayBeSwapped(a));
MOZ_RELEASE_ASSERT(js::ObjectMayBeSwapped(b));
+ // Don't allow a GC which may observe intermediate state or run before we
+ // execute all necessary barriers.
+ gc::AutoSuppressGC nogc(cx);
+
if (!Watchtower::watchObjectSwap(cx, a, b)) {
oomUnsafe.crash("watchObjectSwap");
}
@@ -1305,10 +1309,6 @@ void JSObject::swap(JSContext* cx, HandleObject a, HandleObject b,
a->as<ProxyObject>().setInlineValueArray();
}
} else {
- // Avoid GC in here to avoid confusing the tracing code with our
- // intermediate state.
- gc::AutoSuppressGC suppress(cx);
-
// When the objects have different sizes, they will have different numbers
// of fixed slots before and after the swap, so the slots for native objects
// will need to be rearranged. Remember the original values from the
@@ -2204,7 +2204,6 @@ JS_PUBLIC_API bool js::ShouldIgnorePropertyDefinition(JSContext* cx,
return true;
}
-#ifdef NIGHTLY_BUILD
if (key == JSProto_Set && !JS::Prefs::experimental_new_set_methods() &&
(id == NameToId(cx->names().union_) ||
id == NameToId(cx->names().difference) ||
@@ -2216,6 +2215,7 @@ JS_PUBLIC_API bool js::ShouldIgnorePropertyDefinition(JSContext* cx,
return true;
}
+#ifdef NIGHTLY_BUILD
if (key == JSProto_ArrayBuffer && !JS::Prefs::arraybuffer_transfer() &&
(id == NameToId(cx->names().transfer) ||
id == NameToId(cx->names().transferToFixedLength) ||
@@ -2267,6 +2267,18 @@ JS_PUBLIC_API bool js::ShouldIgnorePropertyDefinition(JSContext* cx,
}
#endif
+#ifdef NIGHTLY_BUILD
+ if (key == JSProto_Math && !JS::Prefs::experimental_float16array() &&
+ (id == NameToId(cx->names().f16round))) {
+ return true;
+ }
+ if (key == JSProto_DataView && !JS::Prefs::experimental_float16array() &&
+ (id == NameToId(cx->names().getFloat16) ||
+ id == NameToId(cx->names().setFloat16))) {
+ return true;
+ }
+#endif
+
return false;
}
diff --git a/js/src/vm/JSScript.h b/js/src/vm/JSScript.h
index 4e44d22304..c46ff65d2d 100644
--- a/js/src/vm/JSScript.h
+++ b/js/src/vm/JSScript.h
@@ -2040,6 +2040,23 @@ class JSScript : public js::BaseScript {
return getString(GET_GCTHING_INDEX(pc));
}
+ bool atomizeString(JSContext* cx, jsbytecode* pc) {
+ MOZ_ASSERT(containsPC<js::GCThingIndex>(pc));
+ MOZ_ASSERT(js::JOF_OPTYPE((JSOp)*pc) == JOF_STRING);
+ js::GCThingIndex index = GET_GCTHING_INDEX(pc);
+ JSString* str = getString(index);
+ if (str->isAtom()) {
+ return true;
+ }
+ JSAtom* atom = js::AtomizeString(cx, str);
+ if (!atom) {
+ return false;
+ }
+ js::gc::CellPtrPreWriteBarrier(data_->gcthings()[index]);
+ data_->gcthings()[index] = JS::GCCellPtr(atom);
+ return true;
+ }
+
JSAtom* getAtom(js::GCThingIndex index) const {
return &gcthings()[index].as<JSString>().asAtom();
}
diff --git a/js/src/vm/JitActivation.cpp b/js/src/vm/JitActivation.cpp
index 83ec19df8a..e3ff3dd990 100644
--- a/js/src/vm/JitActivation.cpp
+++ b/js/src/vm/JitActivation.cpp
@@ -13,6 +13,7 @@
#include <utility> // std::move
#include "debugger/DebugAPI.h" // js::DebugAPI
+#include "jit/Invalidation.h" // js::jit::Invalidate
#include "jit/JSJitFrameIter.h" // js::jit::InlineFrameIterator
#include "jit/RematerializedFrame.h" // js::jit::RematerializedFrame
#include "js/AllocPolicy.h" // js::ReportOutOfMemory
@@ -58,7 +59,9 @@ js::jit::JitActivation::~JitActivation() {
// Traps get handled immediately.
MOZ_ASSERT(!isWasmTrapping());
- clearRematerializedFrames();
+ // Rematerialized frames must have been removed by either the bailout code or
+ // the exception handler.
+ MOZ_ASSERT_IF(rematerializedFrames_, rematerializedFrames_->empty());
}
void js::jit::JitActivation::setBailoutData(
@@ -82,20 +85,9 @@ void js::jit::JitActivation::removeRematerializedFrame(uint8_t* top) {
}
}
-void js::jit::JitActivation::clearRematerializedFrames() {
- if (!rematerializedFrames_) {
- return;
- }
-
- for (RematerializedFrameTable::Enum e(*rematerializedFrames_); !e.empty();
- e.popFront()) {
- e.removeFront();
- }
-}
-
js::jit::RematerializedFrame* js::jit::JitActivation::getRematerializedFrame(
JSContext* cx, const JSJitFrameIter& iter, size_t inlineDepth,
- MaybeReadFallback::FallbackConsequence consequence) {
+ IsLeavingFrame leaving) {
MOZ_ASSERT(iter.activation() == this);
MOZ_ASSERT(iter.isIonScripted());
@@ -117,6 +109,14 @@ js::jit::RematerializedFrame* js::jit::JitActivation::getRematerializedFrame(
// preserve identity. Therefore, we always rematerialize an uninlined
// frame and all its inlined frames at once.
InlineFrameIterator inlineIter(cx, &iter);
+
+ // We can run recover instructions without invalidating if we're always
+ // leaving the frame.
+ MaybeReadFallback::FallbackConsequence consequence =
+ MaybeReadFallback::Fallback_Invalidate;
+ if (leaving == IsLeavingFrame::Yes) {
+ consequence = MaybeReadFallback::Fallback_DoNothing;
+ }
MaybeReadFallback recover(cx, this, &iter, consequence);
// Frames are often rematerialized with the cx inside a Debugger's
@@ -124,6 +124,14 @@ js::jit::RematerializedFrame* js::jit::JitActivation::getRematerializedFrame(
// be in the script's realm.
AutoRealmUnchecked ar(cx, iter.script()->realm());
+ // The Ion frame must be invalidated to ensure the rematerialized frame will
+ // be removed by the bailout code or the exception handler. If we're always
+ // leaving the frame, the caller is responsible for cleaning up the
+ // rematerialized frame.
+ if (leaving == IsLeavingFrame::No && !iter.checkInvalidation()) {
+ jit::Invalidate(cx, iter.script());
+ }
+
if (!RematerializedFrame::RematerializeInlineFrames(cx, top, inlineIter,
recover, frames)) {
return nullptr;
diff --git a/js/src/vm/JitActivation.h b/js/src/vm/JitActivation.h
index be2d63066c..54ca5924d9 100644
--- a/js/src/vm/JitActivation.h
+++ b/js/src/vm/JitActivation.h
@@ -40,6 +40,8 @@ namespace jit {
class BailoutFrameInfo;
+enum class IsLeavingFrame { No, Yes };
+
// A JitActivation is used for frames running in Baseline or Ion.
class JitActivation : public Activation {
// If Baseline, Ion or Wasm code is on the stack, and has called into C++,
@@ -94,8 +96,6 @@ class JitActivation : public Activation {
// purposes. Wasm code can't trap reentrantly.
mozilla::Maybe<wasm::TrapData> wasmTrapData_;
- void clearRematerializedFrames();
-
#ifdef CHECK_OSIPOINT_REGISTERS
protected:
// Used to verify that live registers don't change between a VM call and
@@ -156,8 +156,7 @@ class JitActivation : public Activation {
// The inlineDepth must be within bounds of the frame pointed to by iter.
RematerializedFrame* getRematerializedFrame(
JSContext* cx, const JSJitFrameIter& iter, size_t inlineDepth = 0,
- MaybeReadFallback::FallbackConsequence consequence =
- MaybeReadFallback::Fallback_Invalidate);
+ IsLeavingFrame leaving = IsLeavingFrame::No);
// Look up a rematerialized frame by the fp. If inlineDepth is out of
// bounds of what has been rematerialized, nullptr is returned.
diff --git a/js/src/vm/ModuleBuilder.h b/js/src/vm/ModuleBuilder.h
index 31f8ec2826..738006c069 100644
--- a/js/src/vm/ModuleBuilder.h
+++ b/js/src/vm/ModuleBuilder.h
@@ -90,7 +90,7 @@ class MOZ_STACK_CLASS ModuleBuilder {
MaybeModuleRequestIndex appendModuleRequest(
frontend::TaggedParserAtomIndex specifier,
- frontend::ListNode* assertionList);
+ frontend::ListNode* attributeList);
bool appendExportEntry(frontend::TaggedParserAtomIndex exportName,
frontend::TaggedParserAtomIndex localName,
@@ -101,10 +101,10 @@ class MOZ_STACK_CLASS ModuleBuilder {
void markUsedByStencil(frontend::TaggedParserAtomIndex name);
- [[nodiscard]] bool processAssertions(frontend::StencilModuleRequest& request,
- frontend::ListNode* assertionList);
+ [[nodiscard]] bool processAttributes(frontend::StencilModuleRequest& request,
+ frontend::ListNode* attributeList);
- [[nodiscard]] bool isAssertionSupported(frontend::TaggedParserAtomIndex key);
+ [[nodiscard]] bool isAttributeSupported(frontend::TaggedParserAtomIndex key);
};
template <typename T>
diff --git a/js/src/vm/Modules.cpp b/js/src/vm/Modules.cpp
index 917083a238..867201baa9 100644
--- a/js/src/vm/Modules.cpp
+++ b/js/src/vm/Modules.cpp
@@ -310,7 +310,9 @@ JS_PUBLIC_API JSObject* JS::CreateModuleRequest(
return nullptr;
}
- return ModuleRequestObject::create(cx, specifierAtom, nullptr);
+ Rooted<UniquePtr<ImportAttributeVector>> attributes(cx);
+
+ return ModuleRequestObject::create(cx, specifierAtom, &attributes);
}
JS_PUBLIC_API JSString* JS::GetModuleRequestSpecifier(
@@ -1256,6 +1258,36 @@ static bool ModuleLink(JSContext* cx, Handle<ModuleObject*> module) {
return true;
}
+// https://tc39.es/proposal-import-attributes/#sec-AllImportAttributesSupported
+static bool AllImportAttributesSupported(
+ JSContext* cx, mozilla::Span<const ImportAttribute> attributes) {
+ // Step 1. Let supported be HostGetSupportedImportAttributes().
+ //
+ // Note: This should be driven by a host hook
+ // (HostGetSupportedImportAttributes), however the infrastructure of said host
+ // hook is deeply unclear, and so right now embedders will not have the
+ // ability to alter or extend the set of supported attributes. See
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1840723.
+
+ // Step 2. For each ImportAttribute Record attribute of attributes, do
+ for (const ImportAttribute& attribute : attributes) {
+ // Step 2.a. If supported does not contain attribute.[[Key]], return false.
+ if (attribute.key() != cx->names().type) {
+ UniqueChars printableKey = AtomToPrintableString(cx, attribute.key());
+ if (!printableKey) {
+ return false;
+ }
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_IMPORT_ATTRIBUTES_UNSUPPORTED_ATTRIBUTE,
+ printableKey.get());
+ return false;
+ }
+ }
+
+ // Step 3. Return true.
+ return true;
+}
+
// https://tc39.es/ecma262/#sec-InnerModuleLinking
// ES2023 16.2.1.5.1.1 InnerModuleLinking
static bool InnerModuleLinking(JSContext* cx, Handle<ModuleObject*> module,
@@ -1312,6 +1344,13 @@ static bool InnerModuleLinking(JSContext* cx, Handle<ModuleObject*> module,
for (const RequestedModule& request : module->requestedModules()) {
moduleRequest = request.moduleRequest();
+ // According to the spec, this should be in InnerModuleLoading, but
+ // currently, our module code is not aligned with the spec text.
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1894729
+ if (!AllImportAttributesSupported(cx, moduleRequest->attributes())) {
+ return false;
+ }
+
// Step 9.a. Let requiredModule be ? HostResolveImportedModule(module,
// required).
requiredModule = HostResolveImportedModule(cx, module, moduleRequest,
diff --git a/js/src/vm/Monitor.h b/js/src/vm/Monitor.h
index 6c0fbff0d9..5a2c168729 100644
--- a/js/src/vm/Monitor.h
+++ b/js/src/vm/Monitor.h
@@ -20,7 +20,6 @@ namespace js {
class Monitor {
protected:
friend class AutoLockMonitor;
- friend class AutoUnlockMonitor;
Mutex lock_ MOZ_UNANNOTATED;
ConditionVariable condVar_;
@@ -53,20 +52,6 @@ class AutoLockMonitor : public LockGuard<Mutex> {
void notifyAll() { notifyAll(monitor.condVar_); }
};
-class AutoUnlockMonitor {
- private:
- Monitor& monitor;
-
- public:
- explicit AutoUnlockMonitor(Monitor& monitor) : monitor(monitor) {
- monitor.lock_.unlock();
- }
-
- ~AutoUnlockMonitor() { monitor.lock_.lock(); }
-
- bool isFor(Monitor& other) const { return &monitor.lock_ == &other.lock_; }
-};
-
} // namespace js
#endif /* vm_Monitor_h */
diff --git a/js/src/vm/Opcodes.h b/js/src/vm/Opcodes.h
index 438f361bcf..6db658701d 100644
--- a/js/src/vm/Opcodes.h
+++ b/js/src/vm/Opcodes.h
@@ -412,6 +412,19 @@
MACRO(Typeof, typeof_, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
MACRO(TypeofExpr, typeof_expr, NULL, 1, 1, 1, JOF_BYTE|JOF_IC) \
/*
+ * A compound opcode for `typeof val === "type"` or `typeof val !== "type"`,
+ * where `val` is single identifier.
+ *
+ * Infallible. The result is always a boolean that depends on the type of
+ * `val` and `"type"` string, and the comparison operator.
+ *
+ * Category: Expressions
+ * Type: Other expressions
+ * Operands: TypeofEqOperand operand
+ * Stack: val => (typeof val CMP "type")
+ */ \
+ MACRO(TypeofEq, typeof_eq, NULL, 2, 1, 1, JOF_UINT8|JOF_IC) \
+ /*
* [The unary `+` operator][1].
*
* `+val` doesn't do any actual math. It just calls [ToNumber][2](val).
@@ -909,7 +922,7 @@
* Operands: uint32_t nameIndex
* Stack: obj, val => obj
*/ \
- MACRO(InitProp, init_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitProp, init_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPINIT|JOF_IC) \
/*
* Like `JSOp::InitProp`, but define a non-enumerable property.
*
@@ -925,7 +938,7 @@
* Operands: uint32_t nameIndex
* Stack: obj, val => obj
*/ \
- MACRO(InitHiddenProp, init_hidden_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitHiddenProp, init_hidden_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPINIT|JOF_IC) \
/*
* Like `JSOp::InitProp`, but define a non-enumerable, non-writable,
* non-configurable property.
@@ -942,7 +955,7 @@
* Operands: uint32_t nameIndex
* Stack: obj, val => obj
*/ \
- MACRO(InitLockedProp, init_locked_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitLockedProp, init_locked_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPINIT|JOF_IC) \
/*
* Define a data property on `obj` with property key `id` and value `val`.
*
@@ -964,9 +977,9 @@
* Operands:
* Stack: obj, id, val => obj
*/ \
- MACRO(InitElem, init_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT|JOF_IC) \
- MACRO(InitHiddenElem, init_hidden_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT|JOF_IC) \
- MACRO(InitLockedElem, init_locked_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitElem, init_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitHiddenElem, init_hidden_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitLockedElem, init_locked_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPINIT|JOF_IC) \
/*
* Define an accessor property on `obj` with the given `getter`.
* `nameIndex` gives the property name.
@@ -981,8 +994,8 @@
* Operands: uint32_t nameIndex
* Stack: obj, getter => obj
*/ \
- MACRO(InitPropGetter, init_prop_getter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT) \
- MACRO(InitHiddenPropGetter, init_hidden_prop_getter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT) \
+ MACRO(InitPropGetter, init_prop_getter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPINIT) \
+ MACRO(InitHiddenPropGetter, init_hidden_prop_getter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPINIT) \
/*
* Define an accessor property on `obj` with property key `id` and the given `getter`.
*
@@ -998,8 +1011,8 @@
* Operands:
* Stack: obj, id, getter => obj
*/ \
- MACRO(InitElemGetter, init_elem_getter, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT) \
- MACRO(InitHiddenElemGetter, init_hidden_elem_getter, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT) \
+ MACRO(InitElemGetter, init_elem_getter, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPINIT) \
+ MACRO(InitHiddenElemGetter, init_hidden_elem_getter, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPINIT) \
/*
* Define an accessor property on `obj` with the given `setter`.
*
@@ -1015,8 +1028,8 @@
* Operands: uint32_t nameIndex
* Stack: obj, setter => obj
*/ \
- MACRO(InitPropSetter, init_prop_setter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT) \
- MACRO(InitHiddenPropSetter, init_hidden_prop_setter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPINIT) \
+ MACRO(InitPropSetter, init_prop_setter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPINIT) \
+ MACRO(InitHiddenPropSetter, init_hidden_prop_setter, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPINIT) \
/*
* Define an accesssor property on `obj` with property key `id` and the
* given `setter`.
@@ -1032,8 +1045,8 @@
* Operands:
* Stack: obj, id, setter => obj
*/ \
- MACRO(InitElemSetter, init_elem_setter, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT) \
- MACRO(InitHiddenElemSetter, init_hidden_elem_setter, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPINIT) \
+ MACRO(InitElemSetter, init_elem_setter, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPINIT) \
+ MACRO(InitHiddenElemSetter, init_hidden_elem_setter, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPINIT) \
/*
* Get the value of the property `obj.name`. This can call getters and
* proxy traps.
@@ -1048,7 +1061,7 @@
* Operands: uint32_t nameIndex
* Stack: obj => obj[name]
*/ \
- MACRO(GetProp, get_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_PROP|JOF_IC) \
+ MACRO(GetProp, get_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_IC) \
/*
* Get the value of the property `obj[key]`.
*
@@ -1062,7 +1075,7 @@
* Operands:
* Stack: obj, key => obj[key]
*/ \
- MACRO(GetElem, get_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_ELEM|JOF_IC) \
+ MACRO(GetElem, get_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_IC) \
/*
* Non-strict assignment to a property, `obj.name = val`.
*
@@ -1079,7 +1092,7 @@
* Operands: uint32_t nameIndex
* Stack: obj, val => val
*/ \
- MACRO(SetProp, set_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC) \
+ MACRO(SetProp, set_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC) \
/*
* Like `JSOp::SetProp`, but for strict mode code. Throw a TypeError if
* `obj[key]` exists but is non-writable, if it's an accessor property with
@@ -1090,7 +1103,7 @@
* Operands: uint32_t nameIndex
* Stack: obj, val => val
*/ \
- MACRO(StrictSetProp, strict_set_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC) \
+ MACRO(StrictSetProp, strict_set_prop, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC) \
/*
* Non-strict assignment to a property, `obj[key] = val`.
*
@@ -1103,7 +1116,7 @@
* Operands:
* Stack: obj, key, val => val
*/ \
- MACRO(SetElem, set_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC) \
+ MACRO(SetElem, set_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC) \
/*
* Like `JSOp::SetElem`, but for strict mode code. Throw a TypeError if
* `obj[key]` exists but is non-writable, if it's an accessor property with
@@ -1114,7 +1127,7 @@
* Operands:
* Stack: obj, key, val => val
*/ \
- MACRO(StrictSetElem, strict_set_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC) \
+ MACRO(StrictSetElem, strict_set_elem, NULL, 1, 3, 1, JOF_BYTE|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC) \
/*
* Delete a property from `obj`. Push true on success, false if the
* property existed but could not be deleted. This implements `delete
@@ -1131,7 +1144,7 @@
* Operands: uint32_t nameIndex
* Stack: obj => succeeded
*/ \
- MACRO(DelProp, del_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_PROP|JOF_CHECKSLOPPY) \
+ MACRO(DelProp, del_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_CHECKSLOPPY) \
/*
* Like `JSOp::DelProp`, but for strict mode code. Push `true` on success,
* else throw a TypeError.
@@ -1141,7 +1154,7 @@
* Operands: uint32_t nameIndex
* Stack: obj => succeeded
*/ \
- MACRO(StrictDelProp, strict_del_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_PROP|JOF_CHECKSTRICT) \
+ MACRO(StrictDelProp, strict_del_prop, NULL, 5, 1, 1, JOF_ATOM|JOF_CHECKSTRICT) \
/*
* Delete the property `obj[key]` and push `true` on success, `false`
* if the property existed but could not be deleted.
@@ -1157,7 +1170,7 @@
* Operands:
* Stack: obj, key => succeeded
*/ \
- MACRO(DelElem, del_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_ELEM|JOF_CHECKSLOPPY) \
+ MACRO(DelElem, del_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_CHECKSLOPPY) \
/*
* Like `JSOp::DelElem, but for strict mode code. Push `true` on success,
* else throw a TypeError.
@@ -1167,7 +1180,7 @@
* Operands:
* Stack: obj, key => succeeded
*/ \
- MACRO(StrictDelElem, strict_del_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_ELEM|JOF_CHECKSTRICT) \
+ MACRO(StrictDelElem, strict_del_elem, NULL, 1, 2, 1, JOF_BYTE|JOF_CHECKSTRICT) \
/*
* Push true if `obj` has an own property `id`.
*
@@ -1245,7 +1258,7 @@
* Operands: uint32_t nameIndex
* Stack: receiver, obj => super.name
*/ \
- MACRO(GetPropSuper, get_prop_super, NULL, 5, 2, 1, JOF_ATOM|JOF_PROP|JOF_IC) \
+ MACRO(GetPropSuper, get_prop_super, NULL, 5, 2, 1, JOF_ATOM|JOF_IC) \
/*
* Get the value of `receiver[key]`, starting the property search at `obj`.
* In spec terms, `obj.[[Get]](key, receiver)`.
@@ -1263,7 +1276,7 @@
* Operands:
* Stack: receiver, key, obj => super[key]
*/ \
- MACRO(GetElemSuper, get_elem_super, NULL, 1, 3, 1, JOF_BYTE|JOF_ELEM|JOF_IC) \
+ MACRO(GetElemSuper, get_elem_super, NULL, 1, 3, 1, JOF_BYTE|JOF_IC) \
/*
* Assign `val` to `receiver.name`, starting the search for an existing
* property at `obj`. In spec terms, `obj.[[Set]](name, val, receiver)`.
@@ -1280,7 +1293,7 @@
* Operands: uint32_t nameIndex
* Stack: receiver, obj, val => val
*/ \
- MACRO(SetPropSuper, set_prop_super, NULL, 5, 3, 1, JOF_ATOM|JOF_PROP|JOF_PROPSET|JOF_CHECKSLOPPY) \
+ MACRO(SetPropSuper, set_prop_super, NULL, 5, 3, 1, JOF_ATOM|JOF_PROPSET|JOF_CHECKSLOPPY) \
/*
* Like `JSOp::SetPropSuper`, but for strict mode code.
*
@@ -1289,7 +1302,7 @@
* Operands: uint32_t nameIndex
* Stack: receiver, obj, val => val
*/ \
- MACRO(StrictSetPropSuper, strict_set_prop_super, NULL, 5, 3, 1, JOF_ATOM|JOF_PROP|JOF_PROPSET|JOF_CHECKSTRICT) \
+ MACRO(StrictSetPropSuper, strict_set_prop_super, NULL, 5, 3, 1, JOF_ATOM|JOF_PROPSET|JOF_CHECKSTRICT) \
/*
* Assign `val` to `receiver[key]`, strating the search for an existing
* property at `obj`. In spec terms, `obj.[[Set]](key, val, receiver)`.
@@ -1306,7 +1319,7 @@
* Operands:
* Stack: receiver, key, obj, val => val
*/ \
- MACRO(SetElemSuper, set_elem_super, NULL, 1, 4, 1, JOF_BYTE|JOF_ELEM|JOF_PROPSET|JOF_CHECKSLOPPY) \
+ MACRO(SetElemSuper, set_elem_super, NULL, 1, 4, 1, JOF_BYTE|JOF_PROPSET|JOF_CHECKSLOPPY) \
/*
* Like `JSOp::SetElemSuper`, but for strict mode code.
*
@@ -1315,7 +1328,7 @@
* Operands:
* Stack: receiver, key, obj, val => val
*/ \
- MACRO(StrictSetElemSuper, strict_set_elem_super, NULL, 1, 4, 1, JOF_BYTE|JOF_ELEM|JOF_PROPSET|JOF_CHECKSTRICT) \
+ MACRO(StrictSetElemSuper, strict_set_elem_super, NULL, 1, 4, 1, JOF_BYTE|JOF_PROPSET|JOF_CHECKSTRICT) \
/*
* Set up a for-in loop by pushing a `PropertyIteratorObject` over the
* enumerable properties of `val`.
@@ -1521,7 +1534,7 @@
* Operands: uint32_t index
* Stack: array, val => array
*/ \
- MACRO(InitElemArray, init_elem_array, NULL, 5, 2, 1, JOF_UINT32|JOF_ELEM|JOF_PROPINIT) \
+ MACRO(InitElemArray, init_elem_array, NULL, 5, 2, 1, JOF_UINT32|JOF_PROPINIT) \
/*
* Initialize an array element `array[index++]` with value `val`.
*
@@ -1553,7 +1566,7 @@
* Operands:
* Stack: array, index, val => array, (index + 1)
*/ \
- MACRO(InitElemInc, init_elem_inc, NULL, 1, 3, 2, JOF_BYTE|JOF_ELEM|JOF_PROPINIT|JOF_IC) \
+ MACRO(InitElemInc, init_elem_inc, NULL, 1, 3, 2, JOF_BYTE|JOF_PROPINIT|JOF_IC) \
/*
* Push `MagicValue(JS_ELEMENTS_HOLE)`, representing an *Elision* in an
* array literal (like the missing property 0 in the array `[, 1]`).
@@ -2650,7 +2663,7 @@
* Operands: uint32_t nameIndex
* Stack:
*/ \
- MACRO(ThrowSetConst, throw_set_const, NULL, 5, 0, 0, JOF_ATOM|JOF_NAME) \
+ MACRO(ThrowSetConst, throw_set_const, NULL, 5, 0, 0, JOF_ATOM) \
/*
* No-op instruction that marks the top of the bytecode for a
* *TryStatement*.
@@ -2751,7 +2764,7 @@
* Operands: uint24_t localno
* Stack: v => v
*/ \
- MACRO(InitLexical, init_lexical, NULL, 4, 1, 1, JOF_LOCAL|JOF_NAME) \
+ MACRO(InitLexical, init_lexical, NULL, 4, 1, 1, JOF_LOCAL) \
/*
* Initialize a global lexical binding.
*
@@ -2766,7 +2779,7 @@
* Operands: uint32_t nameIndex
* Stack: val => val
*/ \
- MACRO(InitGLexical, init_g_lexical, NULL, 5, 1, 1, JOF_ATOM|JOF_NAME|JOF_PROPINIT|JOF_GNAME|JOF_IC) \
+ MACRO(InitGLexical, init_g_lexical, NULL, 5, 1, 1, JOF_ATOM|JOF_PROPINIT|JOF_GNAME|JOF_IC) \
/*
* Initialize an aliased lexical binding; or mark it as uninitialized.
*
@@ -2784,7 +2797,7 @@
* Operands: uint8_t hops, uint24_t slot
* Stack: v => v
*/ \
- MACRO(InitAliasedLexical, init_aliased_lexical, NULL, 5, 1, 1, JOF_ENVCOORD|JOF_NAME|JOF_PROPINIT) \
+ MACRO(InitAliasedLexical, init_aliased_lexical, NULL, 5, 1, 1, JOF_ENVCOORD|JOF_PROPINIT) \
/*
* Throw a ReferenceError if the value on top of the stack is uninitialized.
*
@@ -2801,7 +2814,7 @@
* Operands: uint24_t localno
* Stack: v => v
*/ \
- MACRO(CheckLexical, check_lexical, NULL, 4, 1, 1, JOF_LOCAL|JOF_NAME) \
+ MACRO(CheckLexical, check_lexical, NULL, 4, 1, 1, JOF_LOCAL) \
/*
* Like `JSOp::CheckLexical` but for aliased bindings.
*
@@ -2816,7 +2829,7 @@
* Operands: uint8_t hops, uint24_t slot
* Stack: v => v
*/ \
- MACRO(CheckAliasedLexical, check_aliased_lexical, NULL, 5, 1, 1, JOF_ENVCOORD|JOF_NAME) \
+ MACRO(CheckAliasedLexical, check_aliased_lexical, NULL, 5, 1, 1, JOF_ENVCOORD) \
/*
* Throw a ReferenceError if the value on top of the stack is
* `MagicValue(JS_UNINITIALIZED_LEXICAL)`. Used in derived class
@@ -2843,7 +2856,7 @@
* Operands: uint32_t nameIndex
* Stack: => global
*/ \
- MACRO(BindGName, bind_g_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_GNAME|JOF_IC) \
+ MACRO(BindGName, bind_g_name, NULL, 5, 0, 1, JOF_ATOM|JOF_GNAME|JOF_IC) \
/*
* Look up a name on the environment chain and push the environment which
* contains a binding for that name. If no such binding exists, push the
@@ -2854,13 +2867,14 @@
* Operands: uint32_t nameIndex
* Stack: => env
*/ \
- MACRO(BindName, bind_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_IC|JOF_USES_ENV) \
+ MACRO(BindName, bind_name, NULL, 5, 0, 1, JOF_ATOM|JOF_IC|JOF_USES_ENV) \
/*
* Find a binding on the environment chain and push its value.
*
* If the binding is an uninitialized lexical, throw a ReferenceError. If
* no such binding exists, throw a ReferenceError unless the next
- * instruction is `JSOp::Typeof`, in which case push `undefined`.
+ * instruction is `JSOp::Typeof` or `JSOp::TypeofEq` (see IsTypeOfNameOp),
+ * in which case push `undefined`.
*
* Implements: [ResolveBinding][1] followed by [GetValue][2]
* (adjusted hackily for `typeof`).
@@ -2876,7 +2890,7 @@
* Operands: uint32_t nameIndex
* Stack: => val
*/ \
- MACRO(GetName, get_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_IC|JOF_USES_ENV) \
+ MACRO(GetName, get_name, NULL, 5, 0, 1, JOF_ATOM|JOF_IC|JOF_USES_ENV) \
/*
* Find a global binding and push its value.
*
@@ -2900,7 +2914,7 @@
* Operands: uint32_t nameIndex
* Stack: => val
*/ \
- MACRO(GetGName, get_g_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_GNAME|JOF_IC) \
+ MACRO(GetGName, get_g_name, NULL, 5, 0, 1, JOF_ATOM|JOF_GNAME|JOF_IC) \
/*
* Push the value of an argument that is stored in the stack frame
* or in an `ArgumentsObject`.
@@ -2910,7 +2924,7 @@
* Operands: uint16_t argno
* Stack: => arguments[argno]
*/ \
- MACRO(GetArg, get_arg, NULL, 3, 0, 1, JOF_QARG|JOF_NAME) \
+ MACRO(GetArg, get_arg, NULL, 3, 0, 1, JOF_QARG) \
/*
* Push the value of an argument that is stored in the stack frame. Like
* `JSOp::GetArg`, but ignores the frame's `ArgumentsObject` and doesn't
@@ -2921,7 +2935,7 @@
* Operands: uint16_t argno
* Stack: => arguments[argno]
*/ \
- MACRO(GetFrameArg, get_frame_arg, NULL, 3, 0, 1, JOF_QARG|JOF_NAME) \
+ MACRO(GetFrameArg, get_frame_arg, NULL, 3, 0, 1, JOF_QARG) \
/*
* Push the value of an optimized local variable.
*
@@ -2933,7 +2947,7 @@
* Operands: uint24_t localno
* Stack: => val
*/ \
- MACRO(GetLocal, get_local, NULL, 4, 0, 1, JOF_LOCAL|JOF_NAME) \
+ MACRO(GetLocal, get_local, NULL, 4, 0, 1, JOF_LOCAL) \
/*
* Push the number of actual arguments as Int32Value.
*
@@ -2983,7 +2997,7 @@
* Operands: uint8_t hops, uint24_t slot
* Stack: => aliasedVar
*/ \
- MACRO(GetAliasedVar, get_aliased_var, NULL, 5, 0, 1, JOF_ENVCOORD|JOF_NAME|JOF_USES_ENV) \
+ MACRO(GetAliasedVar, get_aliased_var, NULL, 5, 0, 1, JOF_ENVCOORD|JOF_USES_ENV) \
/*
* Push the value of an aliased binding, which may have to bypass a DebugEnvironmentProxy
* on the environment chain.
@@ -2993,7 +3007,7 @@
* Operands: uint8_t hops, uint24_t slot
* Stack: => aliasedVar
*/ \
- MACRO(GetAliasedDebugVar, get_aliased_debug_var, NULL, 5, 0, 1, JOF_DEBUGCOORD|JOF_NAME) \
+ MACRO(GetAliasedDebugVar, get_aliased_debug_var, NULL, 5, 0, 1, JOF_DEBUGCOORD) \
/*
* Get the value of a module import by name and pushes it onto the stack.
*
@@ -3002,7 +3016,7 @@
* Operands: uint32_t nameIndex
* Stack: => val
*/ \
- MACRO(GetImport, get_import, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME) \
+ MACRO(GetImport, get_import, NULL, 5, 0, 1, JOF_ATOM) \
/*
* Get the value of a binding from the environment `env`. If the name is
* not bound in `env`, throw a ReferenceError.
@@ -3027,7 +3041,7 @@
* Operands: uint32_t nameIndex
* Stack: env => v
*/ \
- MACRO(GetBoundName, get_bound_name, NULL, 5, 1, 1, JOF_ATOM|JOF_NAME|JOF_IC) \
+ MACRO(GetBoundName, get_bound_name, NULL, 5, 1, 1, JOF_ATOM|JOF_IC) \
/*
* Push the value of an intrinsic onto the stack.
*
@@ -3040,7 +3054,7 @@
* Operands: uint32_t nameIndex
* Stack: => intrinsic[name]
*/ \
- MACRO(GetIntrinsic, get_intrinsic, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_IC) \
+ MACRO(GetIntrinsic, get_intrinsic, NULL, 5, 0, 1, JOF_ATOM|JOF_IC) \
/*
* Pushes the currently executing function onto the stack.
*
@@ -3100,7 +3114,7 @@
* Operands: uint32_t nameIndex
* Stack: env, val => val
*/ \
- MACRO(SetName, set_name, NULL, 5, 2, 1, JOF_ATOM|JOF_NAME|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC|JOF_USES_ENV) \
+ MACRO(SetName, set_name, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPSET|JOF_CHECKSLOPPY|JOF_IC|JOF_USES_ENV) \
/*
* Like `JSOp::SetName`, but throw a TypeError if there is no binding for
* the specified name in `env`, or if the binding is immutable (a `const`
@@ -3115,7 +3129,7 @@
* Operands: uint32_t nameIndex
* Stack: env, val => val
*/ \
- MACRO(StrictSetName, strict_set_name, NULL, 5, 2, 1, JOF_ATOM|JOF_NAME|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC|JOF_USES_ENV) \
+ MACRO(StrictSetName, strict_set_name, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPSET|JOF_CHECKSTRICT|JOF_IC|JOF_USES_ENV) \
/*
* Like `JSOp::SetName`, but for assigning to globals. `env` must be an
* environment pushed by `JSOp::BindGName`.
@@ -3125,7 +3139,7 @@
* Operands: uint32_t nameIndex
* Stack: env, val => val
*/ \
- MACRO(SetGName, set_g_name, NULL, 5, 2, 1, JOF_ATOM|JOF_NAME|JOF_PROPSET|JOF_GNAME|JOF_CHECKSLOPPY|JOF_IC) \
+ MACRO(SetGName, set_g_name, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPSET|JOF_GNAME|JOF_CHECKSLOPPY|JOF_IC) \
/*
* Like `JSOp::StrictSetGName`, but for assigning to globals. `env` must be
* an environment pushed by `JSOp::BindGName`.
@@ -3135,7 +3149,7 @@
* Operands: uint32_t nameIndex
* Stack: env, val => val
*/ \
- MACRO(StrictSetGName, strict_set_g_name, NULL, 5, 2, 1, JOF_ATOM|JOF_NAME|JOF_PROPSET|JOF_GNAME|JOF_CHECKSTRICT|JOF_IC) \
+ MACRO(StrictSetGName, strict_set_g_name, NULL, 5, 2, 1, JOF_ATOM|JOF_PROPSET|JOF_GNAME|JOF_CHECKSTRICT|JOF_IC) \
/*
* Assign `val` to an argument binding that's stored in the stack frame or
* in an `ArgumentsObject`.
@@ -3145,7 +3159,7 @@
* Operands: uint16_t argno
* Stack: val => val
*/ \
- MACRO(SetArg, set_arg, NULL, 3, 1, 1, JOF_QARG|JOF_NAME) \
+ MACRO(SetArg, set_arg, NULL, 3, 1, 1, JOF_QARG) \
/*
* Assign to an optimized local binding.
*
@@ -3154,7 +3168,7 @@
* Operands: uint24_t localno
* Stack: v => v
*/ \
- MACRO(SetLocal, set_local, NULL, 4, 1, 1, JOF_LOCAL|JOF_NAME) \
+ MACRO(SetLocal, set_local, NULL, 4, 1, 1, JOF_LOCAL) \
/*
* Assign to an aliased binding.
*
@@ -3169,7 +3183,7 @@
* Operands: uint8_t hops, uint24_t slot
* Stack: val => val
*/ \
- MACRO(SetAliasedVar, set_aliased_var, NULL, 5, 1, 1, JOF_ENVCOORD|JOF_NAME|JOF_PROPSET|JOF_USES_ENV) \
+ MACRO(SetAliasedVar, set_aliased_var, NULL, 5, 1, 1, JOF_ENVCOORD|JOF_PROPSET|JOF_USES_ENV) \
/*
* Assign to an intrinsic.
*
@@ -3183,7 +3197,7 @@
* Operands: uint32_t nameIndex
* Stack: val => val
*/ \
- MACRO(SetIntrinsic, set_intrinsic, NULL, 5, 1, 1, JOF_ATOM|JOF_NAME) \
+ MACRO(SetIntrinsic, set_intrinsic, NULL, 5, 1, 1, JOF_ATOM) \
/*
* Push a lexical environment onto the environment chain.
*
@@ -3429,7 +3443,7 @@
* Operands: uint32_t nameIndex
* Stack: => succeeded
*/ \
- MACRO(DelName, del_name, NULL, 5, 0, 1, JOF_ATOM|JOF_NAME|JOF_CHECKSLOPPY|JOF_USES_ENV) \
+ MACRO(DelName, del_name, NULL, 5, 0, 1, JOF_ATOM|JOF_CHECKSLOPPY|JOF_USES_ENV) \
/*
* Create and push the `arguments` object for the current function activation.
*
@@ -3635,14 +3649,13 @@
* a power of two. Use this macro to do so.
*/
#define FOR_EACH_TRAILING_UNUSED_OPCODE(MACRO) \
- IF_RECORD_TUPLE(/* empty */, MACRO(235)) \
IF_RECORD_TUPLE(/* empty */, MACRO(236)) \
IF_RECORD_TUPLE(/* empty */, MACRO(237)) \
IF_RECORD_TUPLE(/* empty */, MACRO(238)) \
IF_RECORD_TUPLE(/* empty */, MACRO(239)) \
IF_RECORD_TUPLE(/* empty */, MACRO(240)) \
IF_RECORD_TUPLE(/* empty */, MACRO(241)) \
- MACRO(242) \
+ IF_RECORD_TUPLE(/* empty */, MACRO(242)) \
MACRO(243) \
MACRO(244) \
MACRO(245) \
diff --git a/js/src/vm/PortableBaselineInterpret.cpp b/js/src/vm/PortableBaselineInterpret.cpp
index 2588f12009..17310a325d 100644
--- a/js/src/vm/PortableBaselineInterpret.cpp
+++ b/js/src/vm/PortableBaselineInterpret.cpp
@@ -47,6 +47,7 @@
#include "vm/Opcodes.h"
#include "vm/PlainObject.h"
#include "vm/Shape.h"
+#include "vm/TypeofEqOperand.h" // TypeofEqOperand
#include "debugger/DebugAPI-inl.h"
#include "jit/BaselineFrame-inl.h"
@@ -1262,9 +1263,8 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE(GuardFunctionHasJitEntry) {
ObjOperandId funId = icregs.cacheIRReader.objOperandId();
- bool constructing = icregs.cacheIRReader.readBool();
JSObject* fun = reinterpret_cast<JSObject*>(icregs.icVals[funId.id()]);
- uint16_t flags = FunctionFlags::HasJitEntryFlags(constructing);
+ uint16_t flags = FunctionFlags::HasJitEntryFlags();
if (!fun->as<JSFunction>().flags().hasFlags(flags)) {
return ICInterpretOpResult::NextIC;
}
@@ -1274,7 +1274,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE(GuardFunctionHasNoJitEntry) {
ObjOperandId funId = icregs.cacheIRReader.objOperandId();
JSObject* fun = reinterpret_cast<JSObject*>(icregs.icVals[funId.id()]);
- uint16_t flags = FunctionFlags::HasJitEntryFlags(/*constructing =*/false);
+ uint16_t flags = FunctionFlags::HasJitEntryFlags();
if (fun->as<JSFunction>().flags().hasFlags(flags)) {
return ICInterpretOpResult::NextIC;
}
@@ -1714,6 +1714,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
v = Int32Value(rhs);
break;
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
v = Value::fromRawBits(rhs);
@@ -2690,6 +2691,7 @@ ICInterpretOps(BaselineFrame* frame, VMFrameManager& frameMgr, State& state,
CACHEOP_CASE_UNIMPL(LoadConstantString)
CACHEOP_CASE_UNIMPL(LoadInstanceOfObjectResult)
CACHEOP_CASE_UNIMPL(LoadTypeOfObjectResult)
+ CACHEOP_CASE_UNIMPL(LoadTypeOfEqObjectResult)
CACHEOP_CASE_UNIMPL(DoubleAddResult)
CACHEOP_CASE_UNIMPL(DoubleSubResult)
CACHEOP_CASE_UNIMPL(DoubleMulResult)
@@ -2876,6 +2878,14 @@ DEFINE_IC(Typeof, 1, {
}
});
+DEFINE_IC(TypeofEq, 1, {
+ IC_LOAD_VAL(value0, 0);
+ PUSH_FALLBACK_IC_FRAME();
+ if (!DoTypeOfEqFallback(cx, frame, fallback, value0, &state.res)) {
+ goto error;
+ }
+});
+
DEFINE_IC(GetName, 1, {
IC_LOAD_OBJ(obj0, 0);
PUSH_FALLBACK_IC_FRAME();
@@ -3423,6 +3433,23 @@ PBIResult PortableBaselineInterpret(JSContext* cx_, State& state, Stack& stack,
END_OP(Typeof);
}
+ CASE(TypeofEq) {
+ if (kHybridICs) {
+ TypeofEqOperand operand = TypeofEqOperand::fromRawValue(GET_UINT8(pc));
+ bool result = js::TypeOfValue(Stack::handle(sp)) == operand.type();
+ if (operand.compareOp() == JSOp::Ne) {
+ result = !result;
+ }
+ sp[0] = StackVal(BooleanValue(result));
+ NEXT_IC();
+ } else {
+ IC_POP_ARG(0);
+ INVOKE_IC(TypeofEq);
+ IC_PUSH_RESULT();
+ }
+ END_OP(TypeofEq);
+ }
+
CASE(Pos) {
if (sp[0].asValue().isNumber()) {
// Nothing!
diff --git a/js/src/vm/PropMap.cpp b/js/src/vm/PropMap.cpp
index 8c1acaeea8..9fd6fbe3eb 100644
--- a/js/src/vm/PropMap.cpp
+++ b/js/src/vm/PropMap.cpp
@@ -966,18 +966,20 @@ void PropMapTable::trace(JSTracer* trc) {
}
#ifdef JSGC_HASH_TABLE_CHECKS
-void PropMapTable::checkAfterMovingGC() {
- for (Set::Enum e(set_); !e.empty(); e.popFront()) {
- PropMap* map = e.front().map();
+void PropMapTable::checkAfterMovingGC(JS::Zone* zone) {
+ CheckTableAfterMovingGC(set_, [zone](const auto& entry) {
+ PropMap* map = entry.map();
MOZ_ASSERT(map);
- CheckGCThingAfterMovingGC(map);
+ CheckGCThingAfterMovingGC(map, zone);
- PropertyKey key = map->getKey(e.front().index());
+ PropertyKey key = map->getKey(entry.index());
MOZ_RELEASE_ASSERT(!key.isVoid());
+ if (key.isGCThing()) {
+ CheckGCThingAfterMovingGC(key.toGCThing(), zone);
+ }
- auto p = lookupRaw(key);
- MOZ_RELEASE_ASSERT(p.found() && *p == e.front());
- }
+ return key;
+ });
}
#endif
diff --git a/js/src/vm/PropMap.h b/js/src/vm/PropMap.h
index 17792cdfe7..139bba442b 100644
--- a/js/src/vm/PropMap.h
+++ b/js/src/vm/PropMap.h
@@ -409,7 +409,7 @@ class PropMapTable {
void trace(JSTracer* trc);
#ifdef JSGC_HASH_TABLE_CHECKS
- void checkAfterMovingGC();
+ void checkAfterMovingGC(JS::Zone* zone);
#endif
};
diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
index 5170b072fb..940ca72fa0 100644
--- a/js/src/vm/SelfHosting.cpp
+++ b/js/src/vm/SelfHosting.cpp
@@ -73,7 +73,8 @@
#include "vm/Compression.h"
#include "vm/DateObject.h"
#include "vm/ErrorReporting.h" // js::MaybePrintAndClearPendingException
-#include "vm/FrameIter.h" // js::ScriptFrameIter
+#include "vm/Float16.h"
+#include "vm/FrameIter.h" // js::ScriptFrameIter
#include "vm/GeneratorObject.h"
#include "vm/Interpreter.h"
#include "vm/Iteration.h"
@@ -1025,6 +1026,8 @@ static bool intrinsic_GetTypedArrayKind(JSContext* cx, unsigned argc,
"TYPEDARRAY_KIND_BIGINT64 doesn't match the scalar type");
static_assert(TYPEDARRAY_KIND_BIGUINT64 == Scalar::Type::BigUint64,
"TYPEDARRAY_KIND_BIGUINT64 doesn't match the scalar type");
+ static_assert(TYPEDARRAY_KIND_FLOAT16 == Scalar::Type::Float16,
+ "TYPEDARRAY_KIND_FLOAT16 doesn't match the scalar type");
JSObject* obj = &args[0].toObject();
Scalar::Type type = JS_GetArrayBufferViewType(obj);
@@ -1235,6 +1238,9 @@ static bool IsTypedArrayBitwiseSlice(Scalar::Type sourceType,
case Scalar::Uint32:
return targetType == Scalar::Int32 || targetType == Scalar::Uint32;
+ case Scalar::Float16:
+ return targetType == Scalar::Float16;
+
case Scalar::Float32:
return targetType == Scalar::Float32;
@@ -2204,7 +2210,6 @@ static const JSFunctionSpec intrinsic_functions[] = {
JS_INLINABLE_FN("TypedArrayLengthZeroOnOutOfBounds",
intrinsic_TypedArrayLengthZeroOnOutOfBounds, 1, 0,
IntrinsicTypedArrayLengthZeroOnOutOfBounds),
- JS_FN("TypedArrayNativeSort", intrinsic_TypedArrayNativeSort, 1, 0),
JS_INLINABLE_FN("UnsafeGetInt32FromReservedSlot",
intrinsic_UnsafeGetInt32FromReservedSlot, 2, 0,
IntrinsicUnsafeGetInt32FromReservedSlot),
@@ -2379,6 +2384,8 @@ static const JSFunctionSpec intrinsic_functions[] = {
#ifdef ENABLE_RECORD_TUPLE
JS_FN("std_Tuple_unchecked", tuple_construct, 1, 0),
#endif
+ JS_TRAMPOLINE_FN("std_TypedArray_sort", TypedArrayObject::sort, 1, 0,
+ TypedArraySort),
JS_FS_END};
diff --git a/js/src/vm/ShapeZone.cpp b/js/src/vm/ShapeZone.cpp
index 760fde2e69..840426de61 100644
--- a/js/src/vm/ShapeZone.cpp
+++ b/js/src/vm/ShapeZone.cpp
@@ -12,6 +12,7 @@
#include "vm/Shape-inl.h"
using namespace js;
+using namespace js::gc;
void ShapeZone::fixupPropMapShapeTableAfterMovingGC() {
for (PropMapShapeSet::Enum e(propMapShapes); !e.empty(); e.popFront()) {
@@ -27,73 +28,65 @@ void ShapeZone::fixupPropMapShapeTableAfterMovingGC() {
}
#ifdef JSGC_HASH_TABLE_CHECKS
-void ShapeZone::checkTablesAfterMovingGC() {
- // Assert that the moving GC worked and that nothing is left in the tables
- // that points into the nursery, and that the hash table entries are
- // discoverable.
-
- for (auto r = initialPropMaps.all(); !r.empty(); r.popFront()) {
- SharedPropMap* map = r.front().unbarrieredGet();
- CheckGCThingAfterMovingGC(map);
-
- InitialPropMapHasher::Lookup lookup(map->getKey(0),
- map->getPropertyInfo(0));
- InitialPropMapSet::Ptr ptr = initialPropMaps.lookup(lookup);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
- }
-
- for (auto r = baseShapes.all(); !r.empty(); r.popFront()) {
- BaseShape* base = r.front().unbarrieredGet();
- CheckGCThingAfterMovingGC(base);
-
- BaseShapeHasher::Lookup lookup(base->clasp(), base->realm(), base->proto());
- BaseShapeSet::Ptr ptr = baseShapes.lookup(lookup);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
- }
-
- for (auto r = initialShapes.all(); !r.empty(); r.popFront()) {
- SharedShape* shape = r.front().unbarrieredGet();
- CheckGCThingAfterMovingGC(shape);
-
- using Lookup = InitialShapeHasher::Lookup;
- Lookup lookup(shape->getObjectClass(), shape->realm(), shape->proto(),
- shape->numFixedSlots(), shape->objectFlags());
- InitialShapeSet::Ptr ptr = initialShapes.lookup(lookup);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
- }
-
- for (auto r = propMapShapes.all(); !r.empty(); r.popFront()) {
- SharedShape* shape = r.front().unbarrieredGet();
- CheckGCThingAfterMovingGC(shape);
-
- using Lookup = PropMapShapeHasher::Lookup;
- Lookup lookup(shape->base(), shape->numFixedSlots(), shape->propMap(),
- shape->propMapLength(), shape->objectFlags());
- PropMapShapeSet::Ptr ptr = propMapShapes.lookup(lookup);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
- }
-
- for (auto r = proxyShapes.all(); !r.empty(); r.popFront()) {
- ProxyShape* shape = r.front().unbarrieredGet();
- CheckGCThingAfterMovingGC(shape);
-
- using Lookup = ProxyShapeHasher::Lookup;
- Lookup lookup(shape->getObjectClass(), shape->realm(), shape->proto(),
- shape->objectFlags());
- ProxyShapeSet::Ptr ptr = proxyShapes.lookup(lookup);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
- }
-
- for (auto r = wasmGCShapes.all(); !r.empty(); r.popFront()) {
- WasmGCShape* shape = r.front().unbarrieredGet();
- CheckGCThingAfterMovingGC(shape);
-
- using Lookup = WasmGCShapeHasher::Lookup;
- Lookup lookup(shape->getObjectClass(), shape->realm(), shape->proto(),
- shape->recGroup(), shape->objectFlags());
- WasmGCShapeSet::Ptr ptr = wasmGCShapes.lookup(lookup);
- MOZ_RELEASE_ASSERT(ptr.found() && &*ptr == &r.front());
- }
+void ShapeZone::checkTablesAfterMovingGC(JS::Zone* zone) {
+ CheckTableAfterMovingGC(initialPropMaps, [zone](const auto& entry) {
+ SharedPropMap* map = entry.unbarrieredGet();
+ CheckGCThingAfterMovingGC(map, zone);
+ PropertyKey key = map->getKey(0);
+ if (key.isGCThing()) {
+ CheckGCThingAfterMovingGC(key.toGCThing(), zone);
+ }
+
+ return InitialPropMapHasher::Lookup(key, map->getPropertyInfo(0));
+ });
+
+ CheckTableAfterMovingGC(baseShapes, [zone](const auto& entry) {
+ BaseShape* base = entry.unbarrieredGet();
+ CheckGCThingAfterMovingGC(base, zone);
+ CheckProtoAfterMovingGC(base->proto(), zone);
+
+ return BaseShapeHasher::Lookup(base->clasp(), base->realm(), base->proto());
+ });
+
+ CheckTableAfterMovingGC(initialShapes, [zone](const auto& entry) {
+ SharedShape* shape = entry.unbarrieredGet();
+ CheckGCThingAfterMovingGC(shape, zone);
+ CheckProtoAfterMovingGC(shape->proto(), zone);
+
+ return InitialShapeHasher::Lookup(shape->getObjectClass(), shape->realm(),
+ shape->proto(), shape->numFixedSlots(),
+ shape->objectFlags());
+ });
+
+ CheckTableAfterMovingGC(propMapShapes, [zone](const auto& entry) {
+ SharedShape* shape = entry.unbarrieredGet();
+ CheckGCThingAfterMovingGC(shape, zone);
+ CheckGCThingAfterMovingGC(shape->base(), zone);
+ CheckGCThingAfterMovingGC(shape->propMap(), zone);
+
+ return PropMapShapeHasher::Lookup(shape->base(), shape->numFixedSlots(),
+ shape->propMap(), shape->propMapLength(),
+ shape->objectFlags());
+ });
+
+ CheckTableAfterMovingGC(proxyShapes, [zone](const auto& entry) {
+ ProxyShape* shape = entry.unbarrieredGet();
+ CheckGCThingAfterMovingGC(shape, zone);
+ CheckProtoAfterMovingGC(shape->proto(), zone);
+
+ return ProxyShapeHasher::Lookup(shape->getObjectClass(), shape->realm(),
+ shape->proto(), shape->objectFlags());
+ });
+
+ CheckTableAfterMovingGC(wasmGCShapes, [zone](const auto& entry) {
+ WasmGCShape* shape = entry.unbarrieredGet();
+ CheckGCThingAfterMovingGC(shape, zone);
+ CheckProtoAfterMovingGC(shape->proto(), zone);
+
+ return WasmGCShapeHasher::Lookup(shape->getObjectClass(), shape->realm(),
+ shape->proto(), shape->recGroup(),
+ shape->objectFlags());
+ });
}
#endif // JSGC_HASH_TABLE_CHECKS
diff --git a/js/src/vm/ShapeZone.h b/js/src/vm/ShapeZone.h
index 784465a395..9680af93d9 100644
--- a/js/src/vm/ShapeZone.h
+++ b/js/src/vm/ShapeZone.h
@@ -236,7 +236,7 @@ struct ShapeZone {
void fixupPropMapShapeTableAfterMovingGC();
#ifdef JSGC_HASH_TABLE_CHECKS
- void checkTablesAfterMovingGC();
+ void checkTablesAfterMovingGC(JS::Zone* zone);
#endif
};
diff --git a/js/src/vm/SharedArrayObject.cpp b/js/src/vm/SharedArrayObject.cpp
index e3e25e3de5..cf31660692 100644
--- a/js/src/vm/SharedArrayObject.cpp
+++ b/js/src/vm/SharedArrayObject.cpp
@@ -71,6 +71,23 @@ SharedArrayRawBuffer* SharedArrayRawBuffer::Allocate(bool isGrowable,
if (!p) {
return nullptr;
}
+ MOZ_ASSERT(reinterpret_cast<uintptr_t>(p) %
+ ArrayBufferObject::ARRAY_BUFFER_ALIGNMENT ==
+ 0,
+ "shared array buffer memory is aligned");
+
+ // jemalloc tiny allocations can produce allocations not aligned to the
+ // smallest std::malloc allocation. Ensure shared array buffer allocations
+ // don't have to worry about this special case.
+ static_assert(sizeof(SharedArrayRawBuffer) > sizeof(void*),
+ "SharedArrayRawBuffer doesn't fit in jemalloc tiny allocation");
+
+ static_assert(sizeof(SharedArrayRawBuffer) %
+ ArrayBufferObject::ARRAY_BUFFER_ALIGNMENT ==
+ 0,
+ "sizeof(SharedArrayRawBuffer) is a multiple of the array "
+ "buffer alignment, so |p + sizeof(SharedArrayRawBuffer)| is "
+ "also array buffer aligned");
uint8_t* buffer = p + sizeof(SharedArrayRawBuffer);
return new (p) SharedArrayRawBuffer(isGrowable, buffer, length);
@@ -587,6 +604,7 @@ SharedArrayBufferType* SharedArrayBufferObject::NewWith(
bool SharedArrayBufferObject::acceptRawBuffer(SharedArrayRawBuffer* buffer,
size_t length) {
+ MOZ_ASSERT(!isInitialized());
if (!zone()->addSharedMemory(buffer,
SharedArrayMappedSize(buffer->isWasm(), length),
MemoryUse::SharedArrayRawBuffer)) {
@@ -595,6 +613,7 @@ bool SharedArrayBufferObject::acceptRawBuffer(SharedArrayRawBuffer* buffer,
setFixedSlot(RAWBUF_SLOT, PrivateValue(buffer));
setFixedSlot(LENGTH_SLOT, PrivateValue(length));
+ MOZ_ASSERT(isInitialized());
return true;
}
@@ -605,6 +624,7 @@ void SharedArrayBufferObject::dropRawBuffer() {
MemoryUse::SharedArrayRawBuffer);
rawBufferObject()->dropReference();
setFixedSlot(RAWBUF_SLOT, UndefinedValue());
+ MOZ_ASSERT(!isInitialized());
}
SharedArrayRawBuffer* SharedArrayBufferObject::rawBufferObject() const {
@@ -639,6 +659,11 @@ void SharedArrayBufferObject::addSizeOfExcludingThis(
// the refcount goes down). But that's unlikely and hard to avoid, so we
// just live with the risk.
const SharedArrayBufferObject& buf = obj->as<SharedArrayBufferObject>();
+
+ if (MOZ_UNLIKELY(!buf.isInitialized())) {
+ return;
+ }
+
size_t nbytes = buf.byteLengthOrMaxByteLength();
size_t owned = nbytes / buf.rawBufferObject()->refcount();
if (buf.isWasm()) {
@@ -683,6 +708,7 @@ SharedArrayBufferObject* SharedArrayBufferObject::createFromNewRawBuffer(
if (!obj->acceptRawBuffer(buffer, initialSize)) {
buffer->dropReference();
+ js::ReportOutOfMemory(cx);
return nullptr;
}
diff --git a/js/src/vm/SharedArrayObject.h b/js/src/vm/SharedArrayObject.h
index 525ee78451..e3d14254d4 100644
--- a/js/src/vm/SharedArrayObject.h
+++ b/js/src/vm/SharedArrayObject.h
@@ -344,6 +344,13 @@ class SharedArrayBufferObject : public ArrayBufferObjectMaybeShared {
return rawBufferObject()->volatileByteLength();
}
+ private:
+ bool isInitialized() const {
+ bool initialized = getFixedSlot(RAWBUF_SLOT).isDouble();
+ MOZ_ASSERT_IF(initialized, getFixedSlot(LENGTH_SLOT).isDouble());
+ return initialized;
+ }
+
public:
// Returns either the byte length for fixed-length shared arrays. Or the
// maximum byte length for growable shared arrays.
diff --git a/js/src/vm/StringType-inl.h b/js/src/vm/StringType-inl.h
index 4548e5d7ea..5c600531a1 100644
--- a/js/src/vm/StringType-inl.h
+++ b/js/src/vm/StringType-inl.h
@@ -376,6 +376,7 @@ inline JSDependentString::JSDependentString(JSLinearString* base, size_t start,
setLengthAndFlags(length, INIT_DEPENDENT_FLAGS);
d.s.u2.nonInlineCharsTwoByte = base->twoByteChars(nogc) + start;
}
+ base->setDependedOn();
d.s.u3.base = base;
if (isTenured() && !base->isTenured()) {
base->storeBuffer()->putWholeCell(this);
diff --git a/js/src/vm/StringType.cpp b/js/src/vm/StringType.cpp
index b735b91b71..b6bd22e3d4 100644
--- a/js/src/vm/StringType.cpp
+++ b/js/src/vm/StringType.cpp
@@ -359,7 +359,7 @@ const char* RepresentationToString(const JSString* s) {
template <typename KnownF, typename UnknownF>
void ForEachStringFlag(const JSString* str, uint32_t flags, KnownF known,
UnknownF unknown) {
- for (uint32_t i = js::Bit(3); i < js::Bit(16); i = i << 1) {
+ for (uint32_t i = js::Bit(3); i < js::Bit(17); i = i << 1) {
if (!(flags & i)) {
continue;
}
@@ -406,7 +406,11 @@ void ForEachStringFlag(const JSString* str, uint32_t flags, KnownF known,
known("LATIN1_CHARS_BIT");
break;
case JSString::ATOM_IS_INDEX_BIT:
- known("ATOM_IS_INDEX_BIT");
+ if (str->isAtom()) {
+ known("ATOM_IS_INDEX_BIT");
+ } else {
+ known("ATOM_REF_BIT");
+ }
break;
case JSString::INDEX_VALUE_BIT:
known("INDEX_VALUE_BIT");
@@ -418,7 +422,7 @@ void ForEachStringFlag(const JSString* str, uint32_t flags, KnownF known,
if (str->isRope()) {
known("FLATTEN_VISIT_RIGHT");
} else {
- known("NON_DEDUP_BIT");
+ known("DEPENDED_ON_BIT");
}
break;
case JSString::FLATTEN_FINISH_NODE:
@@ -429,7 +433,7 @@ void ForEachStringFlag(const JSString* str, uint32_t flags, KnownF known,
} else if (str->isAtom()) {
known("PINNED_ATOM_BIT");
} else {
- unknown(i);
+ known("NON_DEDUP_BIT");
}
break;
default:
@@ -936,6 +940,7 @@ JSLinearString* JSRope::flattenInternal(JSRope* root) {
const size_t wholeLength = root->length();
size_t wholeCapacity;
CharT* wholeChars;
+ uint32_t newRootFlags = 0;
AutoCheckCannotGC nogc;
@@ -1041,6 +1046,7 @@ finish_node: {
StringFlagsForCharType<CharT>(INIT_DEPENDENT_FLAGS));
str->d.s.u3.base =
reinterpret_cast<JSLinearString*>(root); /* will be true on exit */
+ newRootFlags |= DEPENDED_ON_BIT;
// Every interior (rope) node in the rope's tree will be visited during
// the traversal and post-barriered here, so earlier additions of
@@ -1079,10 +1085,24 @@ finish_root:
JSString& left = *leftmostChild;
RemoveCellMemory(&left, left.allocSize(), MemoryUse::StringContents);
+ // Inherit NON_DEDUP_BIT from the leftmost string.
+ newRootFlags |= left.flags() & NON_DEDUP_BIT;
+
+ // Set root's DEPENDED_ON_BIT because the leftmost string is now a
+ // dependent.
+ newRootFlags |= DEPENDED_ON_BIT;
+
uint32_t flags = INIT_DEPENDENT_FLAGS;
if (left.inStringToAtomCache()) {
flags |= IN_STRING_TO_ATOM_CACHE;
}
+ // If left was depended on, we need to make sure we preserve that. Even
+ // though the string that depended on left's buffer will now depend on
+ // root's buffer, if left is the only edge to root, replacing left with an
+ // atom ref would break that edge and allow root's buffer to be freed.
+ if (left.isDependedOn()) {
+ flags |= DEPENDED_ON_BIT;
+ }
left.setLengthAndFlags(left.length(), StringFlagsForCharType<CharT>(flags));
left.d.s.u3.base = &root->asLinear();
if (left.isTenured() && !root->isTenured()) {
@@ -1091,10 +1111,12 @@ finish_root:
// being freed (because the leftmost child may have a tenured dependent
// string that cannot be updated.)
root->storeBuffer()->putWholeCell(&left);
- root->setNonDeduplicatable();
+ newRootFlags |= NON_DEDUP_BIT;
}
}
+ root->setHeaderFlagBit(newRootFlags);
+
return &root->asLinear();
}
@@ -1477,18 +1499,17 @@ uint32_t JSAtom::getIndexSlow() const {
: AtomCharsToIndex(twoByteChars(nogc), len);
}
-// Prevent the actual owner of the string's characters from being deduplicated
-// (and thus freeing its characters, which would invalidate the ASSC's chars
-// pointer). Intermediate dependent strings on the chain can be deduplicated,
-// since the base will be updated to the root base during tenuring anyway and
-// the intermediates won't matter.
-void PreventRootBaseDeduplication(JSLinearString* s) {
+// Ensure that the incoming s.chars pointer is stable, as in, it cannot be
+// changed even across a GC. That requires that the string that owns the chars
+// not be collected or deduplicated.
+void AutoStableStringChars::holdStableChars(JSLinearString* s) {
while (s->hasBase()) {
s = s->base();
}
if (!s->isTenured()) {
s->setNonDeduplicatable();
}
+ s_ = s;
}
bool AutoStableStringChars::init(JSContext* cx, JSString* s) {
@@ -1498,6 +1519,7 @@ bool AutoStableStringChars::init(JSContext* cx, JSString* s) {
}
MOZ_ASSERT(state_ == Uninitialized);
+ length_ = linearString->length();
// Inline and nursery-allocated chars may move during a GC, so copy them
// out into a temporary malloced buffer. Note that we cannot update the
@@ -1516,9 +1538,7 @@ bool AutoStableStringChars::init(JSContext* cx, JSString* s) {
twoByteChars_ = linearString->rawTwoByteChars();
}
- PreventRootBaseDeduplication(linearString);
-
- s_ = linearString;
+ holdStableChars(linearString);
return true;
}
@@ -1529,6 +1549,7 @@ bool AutoStableStringChars::initTwoByte(JSContext* cx, JSString* s) {
}
MOZ_ASSERT(state_ == Uninitialized);
+ length_ = linearString->length();
if (linearString->hasLatin1Chars()) {
return copyAndInflateLatin1Chars(cx, linearString);
@@ -1542,9 +1563,7 @@ bool AutoStableStringChars::initTwoByte(JSContext* cx, JSString* s) {
state_ = TwoByte;
twoByteChars_ = linearString->rawTwoByteChars();
- PreventRootBaseDeduplication(linearString);
-
- s_ = linearString;
+ holdStableChars(linearString);
return true;
}
@@ -1574,16 +1593,18 @@ T* AutoStableStringChars::allocOwnChars(JSContext* cx, size_t count) {
bool AutoStableStringChars::copyAndInflateLatin1Chars(
JSContext* cx, Handle<JSLinearString*> linearString) {
- size_t length = linearString->length();
- char16_t* chars = allocOwnChars<char16_t>(cx, length);
+ MOZ_ASSERT(state_ == Uninitialized);
+ MOZ_ASSERT(s_ == nullptr);
+
+ char16_t* chars = allocOwnChars<char16_t>(cx, length_);
if (!chars) {
return false;
}
// Copy |src[0..length]| to |dest[0..length]| when copying doesn't narrow and
// therefore can't lose information.
- auto src = AsChars(Span(linearString->rawLatin1Chars(), length));
- auto dest = Span(chars, length);
+ auto src = AsChars(Span(linearString->rawLatin1Chars(), length_));
+ auto dest = Span(chars, length_);
ConvertLatin1toUtf16(src, dest);
state_ = TwoByte;
@@ -1594,13 +1615,15 @@ bool AutoStableStringChars::copyAndInflateLatin1Chars(
bool AutoStableStringChars::copyLatin1Chars(
JSContext* cx, Handle<JSLinearString*> linearString) {
- size_t length = linearString->length();
- JS::Latin1Char* chars = allocOwnChars<JS::Latin1Char>(cx, length);
+ MOZ_ASSERT(state_ == Uninitialized);
+ MOZ_ASSERT(s_ == nullptr);
+
+ JS::Latin1Char* chars = allocOwnChars<JS::Latin1Char>(cx, length_);
if (!chars) {
return false;
}
- PodCopy(chars, linearString->rawLatin1Chars(), length);
+ PodCopy(chars, linearString->rawLatin1Chars(), length_);
state_ = Latin1;
latin1Chars_ = chars;
@@ -1610,13 +1633,15 @@ bool AutoStableStringChars::copyLatin1Chars(
bool AutoStableStringChars::copyTwoByteChars(
JSContext* cx, Handle<JSLinearString*> linearString) {
- size_t length = linearString->length();
- char16_t* chars = allocOwnChars<char16_t>(cx, length);
+ MOZ_ASSERT(state_ == Uninitialized);
+ MOZ_ASSERT(s_ == nullptr);
+
+ char16_t* chars = allocOwnChars<char16_t>(cx, length_);
if (!chars) {
return false;
}
- PodCopy(chars, linearString->rawTwoByteChars(), length);
+ PodCopy(chars, linearString->rawTwoByteChars(), length_);
state_ = TwoByte;
twoByteChars_ = chars;
@@ -2501,6 +2526,55 @@ bool JSString::fillWithRepresentatives(JSContext* cx,
return true;
}
+bool JSString::tryReplaceWithAtomRef(JSAtom* atom) {
+ MOZ_ASSERT(!isAtomRef());
+
+ if (isDependedOn() || isInline() || isExternal()) {
+ return false;
+ }
+
+ AutoCheckCannotGC nogc;
+ if (hasOutOfLineChars()) {
+ void* buffer = asLinear().nonInlineCharsRaw();
+ // This is a little cheeky and so deserves a comment. If the string is
+ // not tenured, then either its buffer lives purely in the nursery, in
+ // which case it will just be forgotten and blown away in the next
+ // minor GC, or it is tracked in the nursery's mallocedBuffers hashtable,
+ // in which case it will be freed for us in the next minor GC. We opt
+ // to let the GC take care of it since there's a chance it will run
+ // during idle time.
+ if (isTenured()) {
+ RemoveCellMemory(this, allocSize(), MemoryUse::StringContents);
+ js_free(buffer);
+ }
+ }
+
+ // Pre-barrier for d.s.u3 which is overwritten and d.s.u2 which is ignored
+ // for atom refs.
+ MOZ_ASSERT(isRope() || isLinear());
+ if (isRope()) {
+ PreWriteBarrier(d.s.u2.left);
+ PreWriteBarrier(d.s.u3.right);
+ } else if (isDependent()) {
+ PreWriteBarrier(d.s.u3.base);
+ }
+
+ uint32_t flags = INIT_ATOM_REF_FLAGS;
+ d.s.u3.atom = atom;
+ if (atom->hasLatin1Chars()) {
+ flags |= LATIN1_CHARS_BIT;
+ setLengthAndFlags(length(), flags);
+ setNonInlineChars(atom->chars<Latin1Char>(nogc));
+ } else {
+ setLengthAndFlags(length(), flags);
+ setNonInlineChars(atom->chars<char16_t>(nogc));
+ }
+ // Redundant, but just a reminder that this needs to be true or else we need
+ // to check and conditionally put ourselves in the store buffer
+ MOZ_ASSERT(atom->isTenured());
+ return true;
+}
+
/*** Conversions ************************************************************/
UniqueChars js::EncodeLatin1(JSContext* cx, JSString* str) {
diff --git a/js/src/vm/StringType.h b/js/src/vm/StringType.h
index 38dea85c60..4073c45c70 100644
--- a/js/src/vm/StringType.h
+++ b/js/src/vm/StringType.h
@@ -147,6 +147,8 @@ bool CheckStringIsIndex(const CharT* s, size_t length, uint32_t* indexp);
* JSLinearString latin1Chars, twoByteChars / -
* |
* +-- JSDependentString base / -
+ * | |
+ * | +-- JSAtomRefString - / base points to an atom
* |
* +-- JSExternalString - / char array memory managed by embedding
* |
@@ -275,6 +277,7 @@ class JSString : public js::gc::CellWithLengthAndFlags {
} u2;
union {
JSLinearString* base; /* JSDependentString */
+ JSAtom* atom; /* JSAtomRefString */
JSString* right; /* JSRope */
size_t capacity; /* JSLinearString (extensible) */
const JSExternalStringCallbacks*
@@ -317,28 +320,30 @@ class JSString : public js::gc::CellWithLengthAndFlags {
* String Instance Subtype
* type encoding predicate
* -----------------------------------------
- * Rope 000000 000 xxxx0x xxx
- * Linear 000010 000 xxxx1x xxx
- * Dependent 000110 000 xxx1xx xxx
- * External 100010 000 100010 xxx
- * Extensible 010010 000 010010 xxx
- * Inline 001010 000 xx1xxx xxx
- * FatInline 011010 000 x11xxx xxx
- * JSAtom - xxxxx1 xxx
- * NormalAtom 000011 000 xx0xx1 xxx
- * PermanentAtom 100011 000 1xxxx1 xxx
- * ThinInlineAtom 001011 000 x01xx1 xxx
- * FatInlineAtom 011011 000 x11xx1 xxx
- * |||||| |||
- * |||||| ||\- [0] reserved (FORWARD_BIT)
- * |||||| |\-- [1] reserved
- * |||||| \--- [2] reserved
- * |||||\----- [3] IsAtom
- * ||||\------ [4] IsLinear
- * |||\------- [5] IsDependent
- * ||\-------- [6] IsInline
- * |\--------- [7] FatInlineAtom/Extensible
- * \---------- [8] External/Permanent
+ * Rope 0000000 000 xxxxx0x xxx
+ * Linear 0000010 000 xxxxx1x xxx
+ * Dependent 0000110 000 xxxx1xx xxx
+ * AtomRef 1000110 000 1xxxxxx xxx
+ * External 0100010 000 x100010 xxx
+ * Extensible 0010010 000 x010010 xxx
+ * Inline 0001010 000 xxx1xxx xxx
+ * FatInline 0011010 000 xx11xxx xxx
+ * JSAtom - xxxxxx1 xxx
+ * NormalAtom 0000011 000 xxx0xx1 xxx
+ * PermanentAtom 0100011 000 x1xxxx1 xxx
+ * ThinInlineAtom 0001011 000 xx01xx1 xxx
+ * FatInlineAtom 0011011 000 xx11xx1 xxx
+ * ||||||| |||
+ * ||||||| ||\- [0] reserved (FORWARD_BIT)
+ * ||||||| |\-- [1] reserved
+ * ||||||| \--- [2] reserved
+ * ||||||\----- [3] IsAtom
+ * |||||\------ [4] IsLinear
+ * ||||\------- [5] IsDependent
+ * |||\-------- [6] IsInline
+ * ||\--------- [7] FatInlineAtom/Extensible
+ * |\---------- [8] External/Permanent
+ * \----------- [9] AtomRef
*
* Bits 0..2 are reserved for use by the GC (see
* gc::CellFlagBitsReservedForGC). In particular, bit 0 is currently used for
@@ -364,6 +369,8 @@ class JSString : public js::gc::CellWithLengthAndFlags {
static const uint32_t LINEAR_BIT = js::Bit(4);
static const uint32_t DEPENDENT_BIT = js::Bit(5);
static const uint32_t INLINE_CHARS_BIT = js::Bit(6);
+ // Indicates a dependent string pointing to an atom
+ static const uint32_t ATOM_REF_BIT = js::Bit(9);
static const uint32_t LINEAR_IS_EXTENSIBLE_BIT = js::Bit(7);
static const uint32_t INLINE_IS_FAT_BIT = js::Bit(7);
@@ -383,11 +390,19 @@ class JSString : public js::gc::CellWithLengthAndFlags {
static const uint32_t INIT_ROPE_FLAGS = 0;
static const uint32_t INIT_LINEAR_FLAGS = LINEAR_BIT;
static const uint32_t INIT_DEPENDENT_FLAGS = LINEAR_BIT | DEPENDENT_BIT;
+ static const uint32_t INIT_ATOM_REF_FLAGS =
+ INIT_DEPENDENT_FLAGS | ATOM_REF_BIT;
- static const uint32_t TYPE_FLAGS_MASK = js::BitMask(9) - js::BitMask(3);
+ static const uint32_t TYPE_FLAGS_MASK = js::BitMask(10) - js::BitMask(3);
static_assert((TYPE_FLAGS_MASK & js::gc::HeaderWord::RESERVED_MASK) == 0,
"GC reserved bits must not be used for Strings");
+ // Whether this atom's characters store an uint32 index value less than or
+ // equal to MAX_ARRAY_INDEX. This bit means something different if the
+ // string is not an atom (see ATOM_REF_BIT)
+ // See JSLinearString::isIndex.
+ static const uint32_t ATOM_IS_INDEX_BIT = js::Bit(9);
+
// Linear strings:
// - Content and representation are Latin-1 characters.
// - Unmodifiable after construction.
@@ -397,12 +412,7 @@ class JSString : public js::gc::CellWithLengthAndFlags {
// - Flag may be cleared when the rope is changed into a dependent string.
//
// Also see LATIN1_CHARS_BIT description under "Flag Encoding".
- static const uint32_t LATIN1_CHARS_BIT = js::Bit(9);
-
- // Whether this atom's characters store an uint32 index value less than or
- // equal to MAX_ARRAY_INDEX. Not used for non-atomized strings.
- // See JSLinearString::isIndex.
- static const uint32_t ATOM_IS_INDEX_BIT = js::Bit(10);
+ static const uint32_t LATIN1_CHARS_BIT = js::Bit(10);
static const uint32_t INDEX_VALUE_BIT = js::Bit(11);
static const uint32_t INDEX_VALUE_SHIFT = 16;
@@ -424,6 +434,11 @@ class JSString : public js::gc::CellWithLengthAndFlags {
static const uint32_t FLATTEN_MASK =
FLATTEN_VISIT_RIGHT | FLATTEN_FINISH_NODE;
+ // Indicates that this string is depended on by another string. A rope should
+ // never be depended on, and this should never be set during flattening, so
+ // we can reuse the FLATTEN_VISIT_RIGHT bit.
+ static const uint32_t DEPENDED_ON_BIT = FLATTEN_VISIT_RIGHT;
+
static const uint32_t PINNED_ATOM_BIT = js::Bit(15);
static const uint32_t PERMANENT_ATOM_MASK =
ATOM_BIT | PINNED_ATOM_BIT | ATOM_IS_PERMANENT_BIT;
@@ -543,6 +558,35 @@ class JSString : public js::gc::CellWithLengthAndFlags {
return flags() >> INDEX_VALUE_SHIFT;
}
+ /*
+ * Whether any dependent strings point to this string's chars. This is needed
+ * so that we don't replace the string with a forwarded atom and free its
+ * buffer.
+ *
+ * NOTE: we specifically do not set this for atoms, because they are accessed
+ * on many threads and we don't want to mess with their flags if we don't
+ * have to, and it is safe because atoms will never be replaced by an atom
+ * ref.
+ */
+ bool isDependedOn() const {
+ bool result = flags() & DEPENDED_ON_BIT;
+ MOZ_ASSERT_IF(result, !isRope() && !isAtom());
+ return result;
+ }
+
+ bool assertIsValidBase() const {
+ // See isDependedOn comment for why we're excluding atoms
+ return isAtom() || isDependedOn();
+ }
+
+ void setDependedOn() {
+ MOZ_ASSERT(!isRope());
+ if (isAtom()) {
+ return;
+ }
+ setFlagBit(DEPENDED_ON_BIT);
+ }
+
inline size_t allocSize() const;
/* Fallible conversions to more-derived string types. */
@@ -573,6 +617,11 @@ class JSString : public js::gc::CellWithLengthAndFlags {
bool isDependent() const { return flags() & DEPENDENT_BIT; }
MOZ_ALWAYS_INLINE
+ bool isAtomRef() const {
+ return (flags() & ATOM_REF_BIT) && !(flags() & ATOM_BIT);
+ }
+
+ MOZ_ALWAYS_INLINE
JSDependentString& asDependent() const {
MOZ_ASSERT(isDependent());
return *(JSDependentString*)this;
@@ -668,6 +717,8 @@ class JSString : public js::gc::CellWithLengthAndFlags {
inline JSLinearString* base() const;
+ inline JSAtom* atom() const;
+
// The base may be forwarded and becomes a relocation overlay.
// The return value can be a relocation overlay when the base is forwarded,
// or the return value can be the actual base when it is not forwarded.
@@ -678,6 +729,8 @@ class JSString : public js::gc::CellWithLengthAndFlags {
// Only called by the GC during nursery collection.
inline void setBase(JSLinearString* newBase);
+ bool tryReplaceWithAtomRef(JSAtom* atom);
+
void traceBase(JSTracer* trc);
/* Only called by the GC for strings with the AllocKind::STRING kind. */
@@ -1182,6 +1235,20 @@ class JSDependentString : public JSLinearString {
static_assert(sizeof(JSDependentString) == sizeof(JSString),
"string subclasses must be binary-compatible with JSString");
+class JSAtomRefString : public JSDependentString {
+ friend class JSString;
+ friend class js::gc::CellAllocator;
+ friend class js::jit::MacroAssembler;
+
+ public:
+ inline static size_t offsetOfAtom() {
+ return offsetof(JSAtomRefString, d.s.u3.atom);
+ }
+};
+
+static_assert(sizeof(JSAtomRefString) == sizeof(JSString),
+ "string subclasses must be binary-compatible with JSString");
+
class JSExtensibleString : public JSLinearString {
/* Vacuous and therefore unimplemented. */
bool isExtensible() const = delete;
@@ -1737,7 +1804,9 @@ inline JSLinearString* NewStringCopy(
}
/* Copy a counted string and GC-allocate a descriptor for it. */
-template <js::AllowGC allowGC, typename CharT>
+template <
+ js::AllowGC allowGC, typename CharT,
+ typename std::enable_if_t<!std::is_same_v<CharT, unsigned char>>* = nullptr>
inline JSLinearString* NewStringCopy(
JSContext* cx, std::basic_string_view<CharT> s,
js::gc::Heap heap = js::gc::Heap::Default) {
@@ -2030,10 +2099,19 @@ MOZ_ALWAYS_INLINE JSLinearString* JSString::ensureLinear(JSContext* cx) {
inline JSLinearString* JSString::base() const {
MOZ_ASSERT(hasBase());
- MOZ_ASSERT(!d.s.u3.base->isInline());
+ MOZ_ASSERT_IF(!isAtomRef(), !d.s.u3.base->isInline());
+ MOZ_ASSERT(d.s.u3.base->assertIsValidBase());
+ if (isAtomRef()) {
+ return static_cast<JSLinearString*>(d.s.u3.atom);
+ }
return d.s.u3.base;
}
+inline JSAtom* JSString::atom() const {
+ MOZ_ASSERT(isAtomRef());
+ return d.s.u3.atom;
+}
+
inline JSLinearString* JSString::nurseryBaseOrRelocOverlay() const {
MOZ_ASSERT(hasBase());
return d.s.u3.base;
@@ -2184,7 +2262,9 @@ MOZ_ALWAYS_INLINE bool JSAtom::lengthFitsInline<char16_t>(size_t length) {
template <>
MOZ_ALWAYS_INLINE void JSString::setNonInlineChars(const char16_t* chars) {
// Check that the new buffer is located in the StringBufferArena
- checkStringCharsArena(chars);
+ if (!(isAtomRef() && atom()->isInline())) {
+ checkStringCharsArena(chars);
+ }
d.s.u2.nonInlineCharsTwoByte = chars;
}
@@ -2192,7 +2272,9 @@ template <>
MOZ_ALWAYS_INLINE void JSString::setNonInlineChars(
const JS::Latin1Char* chars) {
// Check that the new buffer is located in the StringBufferArena
- checkStringCharsArena(chars);
+ if (!(isAtomRef() && atom()->isInline())) {
+ checkStringCharsArena(chars);
+ }
d.s.u2.nonInlineCharsLatin1 = chars;
}
diff --git a/js/src/vm/TypedArrayObject-inl.h b/js/src/vm/TypedArrayObject-inl.h
index ffb9a3c9f6..741ebe27fb 100644
--- a/js/src/vm/TypedArrayObject-inl.h
+++ b/js/src/vm/TypedArrayObject-inl.h
@@ -28,6 +28,7 @@
#include "util/Memory.h"
#include "vm/ArrayObject.h"
#include "vm/BigIntType.h"
+#include "vm/Float16.h"
#include "vm/NativeObject.h"
#include "vm/Uint8Clamped.h"
@@ -41,6 +42,68 @@ template <typename To, typename From>
inline To ConvertNumber(From src);
template <>
+inline int8_t ConvertNumber<int8_t, float16>(float16 src) {
+ return JS::ToInt8(src.toDouble());
+}
+
+template <>
+inline uint8_t ConvertNumber<uint8_t, float16>(float16 src) {
+ return JS::ToUint8(src.toDouble());
+}
+
+template <>
+inline uint8_clamped ConvertNumber<uint8_clamped, float16>(float16 src) {
+ return uint8_clamped(src.toDouble());
+}
+
+template <>
+inline float16 ConvertNumber<float16, float16>(float16 src) {
+ return src;
+}
+
+template <>
+inline int16_t ConvertNumber<int16_t, float16>(float16 src) {
+ return JS::ToInt16(src.toDouble());
+}
+
+template <>
+inline uint16_t ConvertNumber<uint16_t, float16>(float16 src) {
+ return JS::ToUint16(src.toDouble());
+}
+
+template <>
+inline int32_t ConvertNumber<int32_t, float16>(float16 src) {
+ return JS::ToInt32(src.toDouble());
+}
+
+template <>
+inline uint32_t ConvertNumber<uint32_t, float16>(float16 src) {
+ return JS::ToUint32(src.toDouble());
+}
+
+template <>
+inline int64_t ConvertNumber<int64_t, float16>(float16 src) {
+ return JS::ToInt64(src.toDouble());
+}
+
+template <>
+inline uint64_t ConvertNumber<uint64_t, float16>(float16 src) {
+ return JS::ToUint64(src.toDouble());
+}
+
+// Float16 is a bit of a special case in that it's floating point,
+// but std::is_floating_point_v doesn't know about it.
+template <>
+inline float ConvertNumber<float, float16>(float16 src) {
+ return static_cast<float>(src.toDouble());
+}
+
+template <>
+inline double ConvertNumber<double, float16>(float16 src) {
+ return src.toDouble();
+}
+
+template <>
inline int8_t ConvertNumber<int8_t, float>(float src) {
return JS::ToInt8(src);
}
@@ -56,6 +119,11 @@ inline uint8_clamped ConvertNumber<uint8_clamped, float>(float src) {
}
template <>
+inline float16 ConvertNumber<float16, float>(float src) {
+ return float16(src);
+}
+
+template <>
inline int16_t ConvertNumber<int16_t, float>(float src) {
return JS::ToInt16(src);
}
@@ -101,6 +169,11 @@ inline uint8_clamped ConvertNumber<uint8_clamped, double>(double src) {
}
template <>
+inline float16 ConvertNumber<float16, double>(double src) {
+ return float16(src);
+}
+
+template <>
inline int16_t ConvertNumber<int16_t, double>(double src) {
return JS::ToInt16(src);
}
@@ -183,6 +256,11 @@ struct TypeIDOfType<uint64_t> {
static const JSProtoKey protoKey = JSProto_BigUint64Array;
};
template <>
+struct TypeIDOfType<float16> {
+ static const Scalar::Type id = Scalar::Float16;
+ static const JSProtoKey protoKey = JSProto_Float16Array;
+};
+template <>
struct TypeIDOfType<float> {
static const Scalar::Type id = Scalar::Float32;
static const JSProtoKey protoKey = JSProto_Float32Array;
@@ -309,11 +387,25 @@ class ElementSpecific {
MOZ_ASSERT(offset <= targetLength);
MOZ_ASSERT(sourceLength <= targetLength - offset);
+ // Return early when copying no elements.
+ //
+ // Note: `SharedMem::cast` asserts the memory is properly aligned. Non-zero
+ // memory is correctly aligned, this is statically asserted below. Zero
+ // memory can have a different alignment, so we have to return early.
+ if (sourceLength == 0) {
+ return true;
+ }
+
if (TypedArrayObject::sameBuffer(target, source)) {
return setFromOverlappingTypedArray(target, targetLength, source,
sourceLength, offset);
}
+ // `malloc` returns memory at least as strictly aligned as for max_align_t
+ // and the alignment of max_align_t is a multiple of the size of `T`,
+ // so `SharedMem::cast` will be called with properly aligned memory.
+ static_assert(alignof(std::max_align_t) % sizeof(T) == 0);
+
SharedMem<T*> dest =
target->dataPointerEither().template cast<T*>() + offset;
size_t count = sourceLength;
@@ -383,6 +475,13 @@ class ElementSpecific {
}
break;
}
+ case Scalar::Float16: {
+ SharedMem<float16*> src = data.cast<float16*>();
+ for (size_t i = 0; i < count; ++i) {
+ Ops::store(dest++, ConvertNumber<T>(Ops::load(src++)));
+ }
+ break;
+ }
case Scalar::Float32: {
SharedMem<float*> src = data.cast<float*>();
for (size_t i = 0; i < count; ++i) {
diff --git a/js/src/vm/TypedArrayObject.cpp b/js/src/vm/TypedArrayObject.cpp
index 935a902abe..1594bf7e30 100644
--- a/js/src/vm/TypedArrayObject.cpp
+++ b/js/src/vm/TypedArrayObject.cpp
@@ -12,6 +12,7 @@
#include "mozilla/IntegerTypeTraits.h"
#include "mozilla/Likely.h"
#include "mozilla/PodOperations.h"
+#include "mozilla/ScopeExit.h"
#include "mozilla/TextUtils.h"
#include <algorithm>
@@ -33,6 +34,7 @@
#include "gc/Barrier.h"
#include "gc/MaybeRooted.h"
#include "jit/InlinableNatives.h"
+#include "jit/TrampolineNatives.h"
#include "js/Conversions.h"
#include "js/experimental/TypedData.h" // JS_GetArrayBufferViewType, JS_GetTypedArray{Length,ByteOffset,ByteLength}, JS_IsTypedArrayObject
#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_*
@@ -45,8 +47,10 @@
#include "util/Text.h"
#include "util/WindowsWrapper.h"
#include "vm/ArrayBufferObject.h"
+#include "vm/Float16.h"
#include "vm/FunctionFlags.h" // js::FunctionFlags
#include "vm/GlobalObject.h"
+#include "vm/Interpreter.h"
#include "vm/JSContext.h"
#include "vm/JSObject.h"
#include "vm/PIC.h"
@@ -55,6 +59,7 @@
#include "vm/Uint8Clamped.h"
#include "vm/WrapperObject.h"
+#include "builtin/Sorting-inl.h"
#include "gc/Nursery-inl.h"
#include "vm/ArrayBufferObject-inl.h"
#include "vm/Compartment-inl.h"
@@ -94,6 +99,7 @@ bool TypedArrayObject::convertValue(JSContext* cx, HandleValue v,
case Scalar::Uint16:
case Scalar::Int32:
case Scalar::Uint32:
+ case Scalar::Float16:
case Scalar::Float32:
case Scalar::Float64:
case Scalar::Uint8Clamped: {
@@ -3153,7 +3159,7 @@ static bool uint8array_toHex(JSContext* cx, unsigned argc, Value* vp) {
JS_SELF_HOSTED_FN("reverse", "TypedArrayReverse", 0, 0),
JS_SELF_HOSTED_FN("slice", "TypedArraySlice", 2, 0),
JS_SELF_HOSTED_FN("some", "TypedArraySome", 1, 0),
- JS_SELF_HOSTED_FN("sort", "TypedArraySort", 1, 0),
+ JS_TRAMPOLINE_FN("sort", TypedArrayObject::sort, 1, 0, TypedArraySort),
JS_SELF_HOSTED_FN("entries", "TypedArrayEntries", 0, 0),
JS_SELF_HOSTED_FN("keys", "TypedArrayKeys", 0, 0),
JS_SELF_HOSTED_FN("values", "$TypedArrayValues", 0, 0),
@@ -3236,6 +3242,25 @@ bool TypedArrayObjectTemplate<uint32_t>::getElementPure(
}
template <>
+bool TypedArrayObjectTemplate<float16>::getElementPure(TypedArrayObject* tarray,
+ size_t index,
+ Value* vp) {
+ float16 f16 = getIndex(tarray, index);
+ /*
+ * Doubles in typed arrays could be typed-punned arrays of integers. This
+ * could allow user code to break the engine-wide invariant that only
+ * canonical nans are stored into jsvals, which means user code could
+ * confuse the engine into interpreting a double-typed jsval as an
+ * object-typed jsval.
+ *
+ * This could be removed for platforms/compilers known to convert a 32-bit
+ * non-canonical nan to a 64-bit canonical nan.
+ */
+ *vp = JS::CanonicalizedDoubleValue(f16.toDouble());
+ return true;
+}
+
+template <>
bool TypedArrayObjectTemplate<float>::getElementPure(TypedArrayObject* tarray,
size_t index, Value* vp) {
float val = getIndex(tarray, index);
@@ -3373,8 +3398,8 @@ bool TypedArrayObject::getElementPure(size_t index, Value* vp) {
/* static */
bool TypedArrayObject::getElements(JSContext* cx,
Handle<TypedArrayObject*> tarray,
- Value* vp) {
- size_t length = tarray->length().valueOr(0);
+ size_t length, Value* vp) {
+ MOZ_ASSERT(length <= tarray->length().valueOr(0));
MOZ_ASSERT_IF(length > 0, !tarray->hasDetachedBuffer());
switch (tarray->type()) {
@@ -3934,6 +3959,26 @@ static constexpr
return val ^ FloatingPoint::kSignBit;
}
+template <typename T, typename UnsignedT>
+static constexpr
+ typename std::enable_if_t<std::is_same_v<T, float16>, UnsignedT>
+ UnsignedSortValue(UnsignedT val) {
+ // Flip sign bit for positive numbers; flip all bits for negative numbers,
+ // except negative NaNs.
+
+ // FC00 is negative infinity, (FC00, FFFF] are all NaNs with
+ // the sign-bit set. So any value
+ // larger than negative infinity is a negative NaN.
+ constexpr UnsignedT NegativeInfinity = 0xFC00;
+ if (val > NegativeInfinity) {
+ return val;
+ }
+ if (val & 0x8000) {
+ return ~val;
+ }
+ return val ^ 0x8000;
+}
+
template <typename T>
static typename std::enable_if_t<std::is_integral_v<T> ||
std::is_same_v<T, uint8_clamped>>
@@ -3943,8 +3988,9 @@ TypedArrayStdSort(SharedMem<void*> data, size_t length) {
}
template <typename T>
-static typename std::enable_if_t<std::is_floating_point_v<T>> TypedArrayStdSort(
- SharedMem<void*> data, size_t length) {
+static typename std::enable_if_t<std::is_floating_point_v<T> ||
+ std::is_same_v<T, float16>>
+TypedArrayStdSort(SharedMem<void*> data, size_t length) {
// Sort on the unsigned representation for performance reasons.
using UnsignedT =
typename mozilla::UnsignedStdintTypeForSize<sizeof(T)>::Type;
@@ -4159,7 +4205,13 @@ template <typename T, typename Ops>
static constexpr typename std::enable_if_t<sizeof(T) == 2 || sizeof(T) == 4,
TypedArraySortFn>
TypedArraySort() {
- return TypedArrayRadixSort<T, Ops>;
+ if constexpr (std::is_same_v<T, float16>) {
+ // TODO: Support radix sort for Float16, see
+ // https://bugzilla.mozilla.org/show_bug.cgi?id=1893229
+ return TypedArrayStdSort<T, Ops>;
+ } else {
+ return TypedArrayRadixSort<T, Ops>;
+ }
}
template <typename T, typename Ops>
@@ -4168,45 +4220,261 @@ TypedArraySort() {
return TypedArrayStdSort<T, Ops>;
}
-bool js::intrinsic_TypedArrayNativeSort(JSContext* cx, unsigned argc,
- Value* vp) {
- CallArgs args = CallArgsFromVp(argc, vp);
- MOZ_ASSERT(args.length() == 1);
-
- TypedArrayObject* typedArray =
- UnwrapAndDowncastValue<TypedArrayObject>(cx, args[0]);
- if (!typedArray) {
- return false;
- }
-
- auto length = typedArray->length();
- MOZ_RELEASE_ASSERT(length,
- "TypedArray is neither detached nor out-of-bounds");
-
+static bool TypedArraySortWithoutComparator(JSContext* cx,
+ TypedArrayObject* typedArray,
+ size_t len) {
bool isShared = typedArray->isSharedMemory();
switch (typedArray->type()) {
-#define SORT(_, T, N) \
- case Scalar::N: \
- if (isShared) { \
- if (!TypedArraySort<T, SharedOps>()(cx, typedArray, *length)) { \
- return false; \
- } \
- } else { \
- if (!TypedArraySort<T, UnsharedOps>()(cx, typedArray, *length)) { \
- return false; \
- } \
- } \
+#define SORT(_, T, N) \
+ case Scalar::N: \
+ if (isShared) { \
+ if (!TypedArraySort<T, SharedOps>()(cx, typedArray, len)) { \
+ return false; \
+ } \
+ } else { \
+ if (!TypedArraySort<T, UnsharedOps>()(cx, typedArray, len)) { \
+ return false; \
+ } \
+ } \
break;
JS_FOR_EACH_TYPED_ARRAY(SORT)
#undef SORT
default:
MOZ_CRASH("Unsupported TypedArray type");
}
+ return true;
+}
+
+static MOZ_ALWAYS_INLINE bool TypedArraySortPrologue(JSContext* cx,
+ Handle<Value> thisv,
+ Handle<Value> comparefn,
+ ArraySortData* d,
+ bool* done) {
+ // https://tc39.es/ecma262/#sec-%typedarray%.prototype.sort
+ // 23.2.3.29 %TypedArray%.prototype.sort ( comparefn )
+
+ // Step 1.
+ if (MOZ_UNLIKELY(!comparefn.isUndefined() && !IsCallable(comparefn))) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
+ JSMSG_BAD_TYPEDARRAY_SORT_ARG);
+ return false;
+ }
+
+ // Steps 2-3.
+ Rooted<TypedArrayObject*> tarrayUnwrapped(
+ cx, UnwrapAndTypeCheckValue<TypedArrayObject>(cx, thisv, [cx, &thisv]() {
+ JS_ReportErrorNumberUTF8(cx, GetErrorMessage, nullptr,
+ JSMSG_INCOMPATIBLE_METHOD, "sort", "method",
+ InformalValueTypeName(thisv));
+ }));
+ if (!tarrayUnwrapped) {
+ return false;
+ }
+ auto arrayLength = tarrayUnwrapped->length();
+ if (!arrayLength) {
+ ReportOutOfBounds(cx, tarrayUnwrapped);
+ return false;
+ }
- args.rval().set(args[0]);
+ // Step 4.
+ size_t len = *arrayLength;
+
+ // Arrays with less than two elements remain unchanged when sorted.
+ if (len <= 1) {
+ d->setReturnValue(&thisv.toObject());
+ *done = true;
+ return true;
+ }
+
+ // Fast path for sorting without a comparator.
+ if (comparefn.isUndefined()) {
+ if (!TypedArraySortWithoutComparator(cx, tarrayUnwrapped, len)) {
+ return false;
+ }
+ d->setReturnValue(&thisv.toObject());
+ *done = true;
+ return true;
+ }
+
+ // Ensure length * 2 (used below) doesn't overflow UINT32_MAX.
+ if (MOZ_UNLIKELY(len > UINT32_MAX / 2)) {
+ ReportAllocationOverflow(cx);
+ return false;
+ }
+
+ // Merge sort requires extra scratch space.
+ bool needsScratchSpace = len > ArraySortData::InsertionSortMaxLength;
+
+ Rooted<ArraySortData::ValueVector> vec(cx);
+ if (MOZ_UNLIKELY(!vec.resize(needsScratchSpace ? (2 * len) : len))) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ // Copy elements to JS Value vector.
+ if (!TypedArrayObject::getElements(cx, tarrayUnwrapped, len, vec.begin())) {
+ return false;
+ }
+
+ d->init(&thisv.toObject(), &comparefn.toObject(), std::move(vec.get()), len,
+ len);
+
+ // Continue in ArraySortData::sortTypedArrayWithComparator.
+ MOZ_ASSERT(!*done);
return true;
}
+// Copies sorted elements back to the typed array.
+template <typename T, typename Ops>
+static void StoreSortedElements(TypedArrayObject* tarray, Value* elements,
+ size_t len) {
+ SharedMem<T*> data = tarray->dataPointerEither().cast<T*>();
+ for (size_t i = 0; i < len; i++) {
+ T val;
+ if constexpr (TypeIsFloatingPoint<T>()) {
+ val = elements[i].toDouble();
+ } else if constexpr (std::is_same_v<T, int64_t>) {
+ val = BigInt::toInt64(elements[i].toBigInt());
+ } else if constexpr (std::is_same_v<T, uint64_t>) {
+ val = BigInt::toUint64(elements[i].toBigInt());
+ } else if constexpr (std::is_same_v<T, uint32_t>) {
+ val = uint32_t(elements[i].toNumber());
+ } else {
+ val = elements[i].toInt32();
+ }
+ Ops::store(data + i, val);
+ }
+}
+
+// static
+ArraySortResult ArraySortData::sortTypedArrayWithComparator(ArraySortData* d) {
+ ArraySortResult result =
+ sortWithComparatorShared<ArraySortKind::TypedArray>(d);
+ if (result != ArraySortResult::Done) {
+ return result;
+ }
+
+ // Copy sorted elements to the typed array.
+ JSContext* cx = d->cx();
+ Rooted<TypedArrayObject*> tarrayUnwrapped(
+ cx, UnwrapAndDowncastObject<TypedArrayObject>(cx, d->obj_));
+ if (MOZ_UNLIKELY(!tarrayUnwrapped)) {
+ return ArraySortResult::Failure;
+ }
+
+ auto length = tarrayUnwrapped->length();
+ if (MOZ_LIKELY(length)) {
+ size_t len = std::min<size_t>(*length, d->denseLen);
+ Value* elements = d->list;
+ bool isShared = tarrayUnwrapped->isSharedMemory();
+ switch (tarrayUnwrapped->type()) {
+#define SORT(_, T, N) \
+ case Scalar::N: \
+ if (isShared) { \
+ StoreSortedElements<T, SharedOps>(tarrayUnwrapped, elements, len); \
+ } else { \
+ StoreSortedElements<T, UnsharedOps>(tarrayUnwrapped, elements, len); \
+ } \
+ break;
+ JS_FOR_EACH_TYPED_ARRAY(SORT)
+#undef SORT
+ default:
+ MOZ_CRASH("Unsupported TypedArray type");
+ }
+ }
+
+ d->freeMallocData();
+ d->setReturnValue(d->obj_);
+ return ArraySortResult::Done;
+}
+
+// https://tc39.es/ecma262/#sec-%typedarray%.prototype.sort
+// 23.2.3.29 %TypedArray%.prototype.sort ( comparefn )
+// static
+bool TypedArrayObject::sort(JSContext* cx, unsigned argc, Value* vp) {
+ AutoJSMethodProfilerEntry pseudoFrame(cx, "[TypedArray].prototype", "sort");
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // If we have a comparator argument, use the JIT trampoline implementation
+ // instead. This avoids a performance cliff (especially with large arrays)
+ // because C++ => JIT calls are much slower than Trampoline => JIT calls.
+ if (args.hasDefined(0) && jit::IsBaselineInterpreterEnabled()) {
+ return CallTrampolineNativeJitCode(
+ cx, jit::TrampolineNative::TypedArraySort, args);
+ }
+
+ Rooted<ArraySortData> data(cx, cx);
+
+ // On all return paths other than ArraySortData::sortTypedArrayWithComparator
+ // returning Done, we call freeMallocData to not fail debug assertions. This
+ // matches the JIT trampoline where we can't rely on C++ destructors.
+ auto freeData =
+ mozilla::MakeScopeExit([&]() { data.get().freeMallocData(); });
+
+ bool done = false;
+ if (!TypedArraySortPrologue(cx, args.thisv(), args.get(0), data.address(),
+ &done)) {
+ return false;
+ }
+ if (done) {
+ args.rval().set(data.get().returnValue());
+ return true;
+ }
+
+ FixedInvokeArgs<2> callArgs(cx);
+ Rooted<Value> rval(cx);
+
+ while (true) {
+ ArraySortResult res =
+ ArraySortData::sortTypedArrayWithComparator(data.address());
+ switch (res) {
+ case ArraySortResult::Failure:
+ return false;
+
+ case ArraySortResult::Done:
+ freeData.release();
+ args.rval().set(data.get().returnValue());
+ return true;
+
+ case ArraySortResult::CallJS:
+ case ArraySortResult::CallJSSameRealmNoRectifier:
+ MOZ_ASSERT(data.get().comparatorThisValue().isUndefined());
+ MOZ_ASSERT(&args[0].toObject() == data.get().comparator());
+ callArgs[0].set(data.get().comparatorArg(0));
+ callArgs[1].set(data.get().comparatorArg(1));
+ if (!js::Call(cx, args[0], UndefinedHandleValue, callArgs, &rval)) {
+ return false;
+ }
+ data.get().setComparatorReturnValue(rval);
+ break;
+ }
+ }
+}
+
+ArraySortResult js::TypedArraySortFromJit(
+ JSContext* cx, jit::TrampolineNativeFrameLayout* frame) {
+ // Initialize the ArraySortData class stored in the trampoline frame.
+ void* dataUninit = frame->getFrameData<ArraySortData>();
+ auto* data = new (dataUninit) ArraySortData(cx);
+
+ Rooted<Value> thisv(cx, frame->thisv());
+ Rooted<Value> comparefn(cx);
+ if (frame->numActualArgs() > 0) {
+ comparefn = frame->actualArgs()[0];
+ }
+
+ bool done = false;
+ if (!TypedArraySortPrologue(cx, thisv, comparefn, data, &done)) {
+ return ArraySortResult::Failure;
+ }
+ if (done) {
+ data->freeMallocData();
+ return ArraySortResult::Done;
+ }
+
+ return ArraySortData::sortTypedArrayWithComparator(data);
+}
+
/* JS Public API */
#define IMPL_TYPED_ARRAY_JSAPI_CONSTRUCTORS(ExternalType, NativeType, Name) \
diff --git a/js/src/vm/TypedArrayObject.h b/js/src/vm/TypedArrayObject.h
index 6905e83600..a768e4a579 100644
--- a/js/src/vm/TypedArrayObject.h
+++ b/js/src/vm/TypedArrayObject.h
@@ -22,6 +22,12 @@
namespace js {
+enum class ArraySortResult : uint32_t;
+
+namespace jit {
+class TrampolineNativeFrameLayout;
+}
+
/*
* TypedArrayObject
*
@@ -101,11 +107,11 @@ class TypedArrayObject : public ArrayBufferViewObject {
bool getElementPure(size_t index, Value* vp);
/*
- * Copy all elements from this typed array to vp. vp must point to rooted
- * memory.
+ * Copy |length| elements from this typed array to vp. vp must point to rooted
+ * memory. |length| must not exceed the typed array's current length.
*/
static bool getElements(JSContext* cx, Handle<TypedArrayObject*> tarray,
- Value* vp);
+ size_t length, Value* vp);
static bool GetTemplateObjectForNative(JSContext* cx, Native native,
const JS::HandleValueArray args,
@@ -131,6 +137,7 @@ class TypedArrayObject : public ArrayBufferViewObject {
static bool set(JSContext* cx, unsigned argc, Value* vp);
static bool copyWithin(JSContext* cx, unsigned argc, Value* vp);
+ static bool sort(JSContext* cx, unsigned argc, Value* vp);
bool convertValue(JSContext* cx, HandleValue v,
MutableHandleValue result) const;
@@ -307,10 +314,6 @@ bool DefineTypedArrayElement(JSContext* cx, Handle<TypedArrayObject*> obj,
uint64_t index, Handle<PropertyDescriptor> desc,
ObjectOpResult& result);
-// Sort a typed array in ascending order. The typed array may be wrapped, but
-// must not be detached.
-bool intrinsic_TypedArrayNativeSort(JSContext* cx, unsigned argc, Value* vp);
-
static inline constexpr unsigned TypedArrayShift(Scalar::Type viewType) {
switch (viewType) {
case Scalar::Int8:
@@ -319,6 +322,7 @@ static inline constexpr unsigned TypedArrayShift(Scalar::Type viewType) {
return 0;
case Scalar::Int16:
case Scalar::Uint16:
+ case Scalar::Float16:
return 1;
case Scalar::Int32:
case Scalar::Uint32:
@@ -338,6 +342,9 @@ static inline constexpr unsigned TypedArrayElemSize(Scalar::Type viewType) {
return 1u << TypedArrayShift(viewType);
}
+extern ArraySortResult TypedArraySortFromJit(
+ JSContext* cx, jit::TrampolineNativeFrameLayout* frame);
+
} // namespace js
template <>
diff --git a/js/src/vm/TypeofEqOperand.h b/js/src/vm/TypeofEqOperand.h
new file mode 100644
index 0000000000..b3236baed6
--- /dev/null
+++ b/js/src/vm/TypeofEqOperand.h
@@ -0,0 +1,50 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
+ * vim: set ts=8 sts=2 et sw=2 tw=80:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef vm_TypeofEqOperand_h
+#define vm_TypeofEqOperand_h
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+
+#include <stdint.h> // uint8_t
+
+#include "jspubtd.h" // JSType
+#include "vm/Opcodes.h" // JSOp
+
+namespace js {
+
+struct TypeofEqOperand {
+ static constexpr uint8_t TYPE_MASK = 0x0f;
+ static constexpr uint8_t NEQ_BIT = 0x80;
+
+ private:
+ uint8_t value;
+
+ static uint8_t toNeqBit(JSOp compareOp) {
+ MOZ_ASSERT(compareOp == JSOp::Eq || compareOp == JSOp::Ne);
+ return compareOp == JSOp::Ne ? NEQ_BIT : 0;
+ }
+
+ explicit TypeofEqOperand(uint8_t value) : value(value) {}
+
+ public:
+ TypeofEqOperand(JSType type, JSOp compareOp)
+ : value(type | toNeqBit(compareOp)) {}
+
+ static TypeofEqOperand fromRawValue(uint8_t value) {
+ return TypeofEqOperand(value);
+ }
+
+ JSType type() const { return JSType(value & TYPE_MASK); }
+ JSOp compareOp() const { return (value & NEQ_BIT) ? JSOp::Ne : JSOp::Eq; }
+ uint8_t rawValue() const { return value; }
+};
+
+static_assert((JSTYPE_LIMIT & TypeofEqOperand::TYPE_MASK) == JSTYPE_LIMIT);
+
+} // namespace js
+
+#endif // vm_TypeofEqOperand_h
diff --git a/js/src/vm/Uint8Clamped.h b/js/src/vm/Uint8Clamped.h
index 5cc391f4b7..07a1e5627e 100644
--- a/js/src/vm/Uint8Clamped.h
+++ b/js/src/vm/Uint8Clamped.h
@@ -9,6 +9,8 @@
#include <stdint.h>
+#include "vm/Float16.h"
+
namespace js {
extern uint32_t ClampDoubleToUint8(const double x);
@@ -91,6 +93,10 @@ inline constexpr bool TypeIsFloatingPoint() {
return false;
}
template <>
+inline constexpr bool TypeIsFloatingPoint<float16>() {
+ return true;
+}
+template <>
inline constexpr bool TypeIsFloatingPoint<float>() {
return true;
}