From 36d22d82aa202bb199967e9512281e9a53db42c9 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 21:33:14 +0200 Subject: Adding upstream version 115.7.0esr. Signed-off-by: Daniel Baumann --- js/src/vm/Activation-inl.h | 172 + js/src/vm/Activation.cpp | 84 + js/src/vm/Activation.h | 565 +++ js/src/vm/ArgumentsObject-inl.h | 58 + js/src/vm/ArgumentsObject.cpp | 1182 ++++++ js/src/vm/ArgumentsObject.h | 566 +++ js/src/vm/ArrayBufferObject-inl.h | 57 + js/src/vm/ArrayBufferObject.cpp | 2204 +++++++++++ js/src/vm/ArrayBufferObject.h | 660 ++++ js/src/vm/ArrayBufferObjectMaybeShared.cpp | 76 + js/src/vm/ArrayBufferViewObject.cpp | 319 ++ js/src/vm/ArrayBufferViewObject.h | 166 + js/src/vm/ArrayObject-inl.h | 87 + js/src/vm/ArrayObject.h | 62 + js/src/vm/AsyncFunction.cpp | 349 ++ js/src/vm/AsyncFunction.h | 324 ++ js/src/vm/AsyncFunctionResolveKind.h | 18 + js/src/vm/AsyncIteration.cpp | 1484 +++++++ js/src/vm/AsyncIteration.h | 571 +++ js/src/vm/AtomsTable.h | 123 + js/src/vm/BigIntType.cpp | 3847 ++++++++++++++++++ js/src/vm/BigIntType.h | 481 +++ js/src/vm/BindingKind.h | 111 + js/src/vm/BooleanObject-inl.h | 28 + js/src/vm/BooleanObject.h | 44 + js/src/vm/BoundFunctionObject.cpp | 534 +++ js/src/vm/BoundFunctionObject.h | 174 + js/src/vm/BuildId.cpp | 27 + js/src/vm/BuiltinObjectKind.cpp | 205 + js/src/vm/BuiltinObjectKind.h | 88 + js/src/vm/BytecodeFormatFlags.h | 61 + js/src/vm/BytecodeIterator-inl.h | 40 + js/src/vm/BytecodeIterator.h | 85 + js/src/vm/BytecodeLocation-inl.h | 115 + js/src/vm/BytecodeLocation.cpp | 28 + js/src/vm/BytecodeLocation.h | 354 ++ js/src/vm/BytecodeUtil-inl.h | 242 ++ js/src/vm/BytecodeUtil.cpp | 3110 +++++++++++++++ js/src/vm/BytecodeUtil.h | 665 ++++ js/src/vm/Caches.h | 568 +++ js/src/vm/CallAndConstruct.cpp | 168 + js/src/vm/CallNonGenericMethod.cpp | 35 + js/src/vm/CharacterEncoding.cpp | 888 +++++ js/src/vm/CheckIsObjectKind.h | 24 + js/src/vm/CodeCoverage.cpp | 673 ++++ js/src/vm/CodeCoverage.h | 172 + js/src/vm/CommonPropertyNames.h | 619 +++ js/src/vm/Compartment-inl.h | 442 +++ js/src/vm/Compartment.cpp | 616 +++ js/src/vm/Compartment.h | 537 +++ js/src/vm/CompilationAndEvaluation.cpp | 613 +++ js/src/vm/CompletionKind.h | 16 + js/src/vm/Compression.cpp | 262 ++ js/src/vm/Compression.h | 115 + js/src/vm/DateObject.h | 101 + js/src/vm/DateTime.cpp | 824 ++++ js/src/vm/DateTime.h | 388 ++ js/src/vm/EnvironmentObject-inl.h | 87 + js/src/vm/EnvironmentObject.cpp | 4399 +++++++++++++++++++++ js/src/vm/EnvironmentObject.h | 1512 ++++++++ js/src/vm/EqualityOperations.cpp | 360 ++ js/src/vm/EqualityOperations.h | 72 + js/src/vm/ErrorMessages.cpp | 29 + js/src/vm/ErrorObject-inl.h | 39 + js/src/vm/ErrorObject.cpp | 814 ++++ js/src/vm/ErrorObject.h | 167 + js/src/vm/ErrorReporting.cpp | 585 +++ js/src/vm/ErrorReporting.h | 190 + js/src/vm/Exception.cpp | 60 + js/src/vm/ForOfIterator.cpp | 211 + js/src/vm/FrameIter-inl.h | 54 + js/src/vm/FrameIter.cpp | 1060 +++++ js/src/vm/FrameIter.h | 586 +++ js/src/vm/FunctionFlags.cpp | 13 + js/src/vm/FunctionFlags.h | 320 ++ js/src/vm/FunctionPrefixKind.h | 18 + js/src/vm/GeckoProfiler-inl.h | 141 + js/src/vm/GeckoProfiler.cpp | 561 +++ js/src/vm/GeckoProfiler.h | 255 ++ js/src/vm/GeneratorAndAsyncKind.h | 17 + js/src/vm/GeneratorObject.cpp | 508 +++ js/src/vm/GeneratorObject.h | 255 ++ js/src/vm/GeneratorResumeKind.h | 18 + js/src/vm/GetterSetter.cpp | 27 + js/src/vm/GetterSetter.h | 116 + js/src/vm/GlobalObject-inl.h | 26 + js/src/vm/GlobalObject.cpp | 1052 +++++ js/src/vm/GlobalObject.h | 1166 ++++++ js/src/vm/HelperThreadState.h | 823 ++++ js/src/vm/HelperThreadTask.h | 82 + js/src/vm/HelperThreads.cpp | 2745 +++++++++++++ js/src/vm/HelperThreads.h | 292 ++ js/src/vm/Id.cpp | 50 + js/src/vm/Initialization.cpp | 357 ++ js/src/vm/InlineCharBuffer-inl.h | 158 + js/src/vm/InternalThreadPool.cpp | 289 ++ js/src/vm/InternalThreadPool.h | 74 + js/src/vm/Interpreter-inl.h | 639 +++ js/src/vm/Interpreter.cpp | 5605 +++++++++++++++++++++++++++ js/src/vm/Interpreter.h | 705 ++++ js/src/vm/IsGivenTypeObject-inl.h | 33 + js/src/vm/Iteration.cpp | 2168 +++++++++++ js/src/vm/Iteration.h | 794 ++++ js/src/vm/JSAtom-inl.h | 157 + js/src/vm/JSAtom.cpp | 1148 ++++++ js/src/vm/JSAtom.h | 113 + js/src/vm/JSAtomState.h | 63 + js/src/vm/JSContext-inl.h | 407 ++ js/src/vm/JSContext.cpp | 1386 +++++++ js/src/vm/JSContext.h | 1139 ++++++ js/src/vm/JSFunction-inl.h | 141 + js/src/vm/JSFunction.cpp | 1979 ++++++++++ js/src/vm/JSFunction.h | 875 +++++ js/src/vm/JSONParser.cpp | 1107 ++++++ js/src/vm/JSONParser.h | 517 +++ js/src/vm/JSONPrinter.cpp | 273 ++ js/src/vm/JSONPrinter.h | 93 + js/src/vm/JSObject-inl.h | 597 +++ js/src/vm/JSObject.cpp | 3649 +++++++++++++++++ js/src/vm/JSObject.h | 1099 ++++++ js/src/vm/JSScript-inl.h | 245 ++ js/src/vm/JSScript.cpp | 3779 ++++++++++++++++++ js/src/vm/JSScript.h | 2265 +++++++++++ js/src/vm/JitActivation.cpp | 261 ++ js/src/vm/JitActivation.h | 268 ++ js/src/vm/List-inl.h | 129 + js/src/vm/List.cpp | 11 + js/src/vm/List.h | 91 + js/src/vm/MallocProvider.h | 255 ++ js/src/vm/MatchPairs.h | 141 + js/src/vm/MemoryMetrics.cpp | 889 +++++ js/src/vm/ModuleBuilder.h | 118 + js/src/vm/Modules.cpp | 1830 +++++++++ js/src/vm/Modules.h | 45 + js/src/vm/Monitor.h | 72 + js/src/vm/MutexIDs.h | 81 + js/src/vm/NativeObject-inl.h | 908 +++++ js/src/vm/NativeObject.cpp | 2854 ++++++++++++++ js/src/vm/NativeObject.h | 1892 +++++++++ js/src/vm/NumberObject-inl.h | 28 + js/src/vm/NumberObject.h | 44 + js/src/vm/ObjectFlags-inl.h | 61 + js/src/vm/ObjectFlags.h | 77 + js/src/vm/ObjectOperations-inl.h | 388 ++ js/src/vm/ObjectOperations.h | 301 ++ js/src/vm/OffThreadPromiseRuntimeState.cpp | 299 ++ js/src/vm/OffThreadPromiseRuntimeState.h | 208 + js/src/vm/OffThreadScriptCompilation.cpp | 153 + js/src/vm/Opcodes.h | 3632 +++++++++++++++++ js/src/vm/PIC.cpp | 372 ++ js/src/vm/PIC.h | 246 ++ js/src/vm/PlainObject-inl.h | 94 + js/src/vm/PlainObject.cpp | 334 ++ js/src/vm/PlainObject.h | 111 + js/src/vm/Printer.cpp | 559 +++ js/src/vm/Probes-inl.h | 95 + js/src/vm/Probes.cpp | 64 + js/src/vm/Probes.h | 144 + js/src/vm/ProfilingStack.cpp | 53 + js/src/vm/PromiseLookup.cpp | 273 ++ js/src/vm/PromiseLookup.h | 163 + js/src/vm/PromiseObject.h | 250 ++ js/src/vm/PropMap-inl.h | 251 ++ js/src/vm/PropMap.cpp | 1233 ++++++ js/src/vm/PropMap.h | 1167 ++++++ js/src/vm/PropertyAndElement.cpp | 995 +++++ js/src/vm/PropertyDescriptor.cpp | 91 + js/src/vm/PropertyInfo.h | 221 ++ js/src/vm/PropertyKey.h | 60 + js/src/vm/PropertyResult.h | 103 + js/src/vm/ProxyObject.cpp | 206 + js/src/vm/ProxyObject.h | 165 + js/src/vm/Realm-inl.h | 110 + js/src/vm/Realm.cpp | 774 ++++ js/src/vm/Realm.h | 886 +++++ js/src/vm/RecordTupleShared.cpp | 133 + js/src/vm/RecordTupleShared.h | 32 + js/src/vm/RecordType.cpp | 538 +++ js/src/vm/RecordType.h | 78 + js/src/vm/RegExpObject.cpp | 1232 ++++++ js/src/vm/RegExpObject.h | 223 ++ js/src/vm/RegExpShared.h | 449 +++ js/src/vm/RegExpStatics.cpp | 61 + js/src/vm/RegExpStatics.h | 307 ++ js/src/vm/Runtime.cpp | 847 ++++ js/src/vm/Runtime.h | 1144 ++++++ js/src/vm/SavedFrame.h | 297 ++ js/src/vm/SavedStacks-inl.h | 29 + js/src/vm/SavedStacks.cpp | 2097 ++++++++++ js/src/vm/SavedStacks.h | 342 ++ js/src/vm/Scope.cpp | 1728 +++++++++ js/src/vm/Scope.h | 1891 +++++++++ js/src/vm/ScopeKind.h | 53 + js/src/vm/SelfHosting.cpp | 2784 +++++++++++++ js/src/vm/SelfHosting.h | 287 ++ js/src/vm/Shape-inl.h | 105 + js/src/vm/Shape.cpp | 1484 +++++++ js/src/vm/Shape.h | 925 +++++ js/src/vm/ShapeZone.cpp | 125 + js/src/vm/ShapeZone.h | 244 ++ js/src/vm/SharedArrayObject.cpp | 588 +++ js/src/vm/SharedArrayObject.h | 327 ++ js/src/vm/SharedImmutableStringsCache-inl.h | 75 + js/src/vm/SharedImmutableStringsCache.cpp | 147 + js/src/vm/SharedImmutableStringsCache.h | 425 ++ js/src/vm/SharedMem.h | 208 + js/src/vm/SharedScriptDataTableHolder.cpp | 19 + js/src/vm/SharedScriptDataTableHolder.h | 88 + js/src/vm/SharedStencil.h | 849 ++++ js/src/vm/SourceHook.cpp | 26 + js/src/vm/Stack-inl.h | 859 ++++ js/src/vm/Stack.cpp | 766 ++++ js/src/vm/Stack.h | 999 +++++ js/src/vm/StaticStrings.cpp | 89 + js/src/vm/StaticStrings.h | 276 ++ js/src/vm/StencilCache.cpp | 67 + js/src/vm/StencilCache.h | 181 + js/src/vm/StencilEnums.h | 346 ++ js/src/vm/StencilObject.cpp | 147 + js/src/vm/StencilObject.h | 71 + js/src/vm/StringObject-inl.h | 51 + js/src/vm/StringObject.h | 72 + js/src/vm/StringType-inl.h | 526 +++ js/src/vm/StringType.cpp | 2276 +++++++++++ js/src/vm/StringType.h | 2052 ++++++++++ js/src/vm/StructuredClone.cpp | 4123 ++++++++++++++++++++ js/src/vm/SymbolType.cpp | 146 + js/src/vm/SymbolType.h | 153 + js/src/vm/TaggedProto.cpp | 34 + js/src/vm/TaggedProto.h | 173 + js/src/vm/ThrowMsgKind.cpp | 36 + js/src/vm/ThrowMsgKind.h | 37 + js/src/vm/Time.cpp | 383 ++ js/src/vm/Time.h | 176 + js/src/vm/ToSource.cpp | 249 ++ js/src/vm/ToSource.h | 26 + js/src/vm/TupleType.cpp | 639 +++ js/src/vm/TupleType.h | 87 + js/src/vm/TypedArrayObject-inl.h | 769 ++++ js/src/vm/TypedArrayObject.cpp | 2998 ++++++++++++++ js/src/vm/TypedArrayObject.h | 301 ++ js/src/vm/UbiNode.cpp | 527 +++ js/src/vm/UbiNodeCensus.cpp | 1323 +++++++ js/src/vm/UbiNodeShortestPaths.cpp | 105 + js/src/vm/Uint8Clamped.h | 121 + js/src/vm/UsageStatistics.cpp | 20 + js/src/vm/Value.cpp | 41 + js/src/vm/Warnings.cpp | 105 + js/src/vm/Warnings.h | 27 + js/src/vm/Watchtower.cpp | 296 ++ js/src/vm/Watchtower.h | 120 + js/src/vm/WellKnownAtom.cpp | 45 + js/src/vm/WellKnownAtom.h | 67 + js/src/vm/WindowProxy.cpp | 70 + js/src/vm/WrapperObject.h | 40 + js/src/vm/Xdr.cpp | 167 + js/src/vm/Xdr.h | 457 +++ js/src/vm/jsopcode.py | 382 ++ js/src/vm/make_opcode_doc.py | 195 + 259 files changed, 144450 insertions(+) create mode 100644 js/src/vm/Activation-inl.h create mode 100644 js/src/vm/Activation.cpp create mode 100644 js/src/vm/Activation.h create mode 100644 js/src/vm/ArgumentsObject-inl.h create mode 100644 js/src/vm/ArgumentsObject.cpp create mode 100644 js/src/vm/ArgumentsObject.h create mode 100644 js/src/vm/ArrayBufferObject-inl.h create mode 100644 js/src/vm/ArrayBufferObject.cpp create mode 100644 js/src/vm/ArrayBufferObject.h create mode 100644 js/src/vm/ArrayBufferObjectMaybeShared.cpp create mode 100644 js/src/vm/ArrayBufferViewObject.cpp create mode 100644 js/src/vm/ArrayBufferViewObject.h create mode 100644 js/src/vm/ArrayObject-inl.h create mode 100644 js/src/vm/ArrayObject.h create mode 100644 js/src/vm/AsyncFunction.cpp create mode 100644 js/src/vm/AsyncFunction.h create mode 100644 js/src/vm/AsyncFunctionResolveKind.h create mode 100644 js/src/vm/AsyncIteration.cpp create mode 100644 js/src/vm/AsyncIteration.h create mode 100644 js/src/vm/AtomsTable.h create mode 100644 js/src/vm/BigIntType.cpp create mode 100644 js/src/vm/BigIntType.h create mode 100644 js/src/vm/BindingKind.h create mode 100644 js/src/vm/BooleanObject-inl.h create mode 100644 js/src/vm/BooleanObject.h create mode 100644 js/src/vm/BoundFunctionObject.cpp create mode 100644 js/src/vm/BoundFunctionObject.h create mode 100644 js/src/vm/BuildId.cpp create mode 100644 js/src/vm/BuiltinObjectKind.cpp create mode 100644 js/src/vm/BuiltinObjectKind.h create mode 100644 js/src/vm/BytecodeFormatFlags.h create mode 100644 js/src/vm/BytecodeIterator-inl.h create mode 100644 js/src/vm/BytecodeIterator.h create mode 100644 js/src/vm/BytecodeLocation-inl.h create mode 100644 js/src/vm/BytecodeLocation.cpp create mode 100644 js/src/vm/BytecodeLocation.h create mode 100644 js/src/vm/BytecodeUtil-inl.h create mode 100644 js/src/vm/BytecodeUtil.cpp create mode 100644 js/src/vm/BytecodeUtil.h create mode 100644 js/src/vm/Caches.h create mode 100644 js/src/vm/CallAndConstruct.cpp create mode 100644 js/src/vm/CallNonGenericMethod.cpp create mode 100644 js/src/vm/CharacterEncoding.cpp create mode 100644 js/src/vm/CheckIsObjectKind.h create mode 100644 js/src/vm/CodeCoverage.cpp create mode 100644 js/src/vm/CodeCoverage.h create mode 100644 js/src/vm/CommonPropertyNames.h create mode 100644 js/src/vm/Compartment-inl.h create mode 100644 js/src/vm/Compartment.cpp create mode 100644 js/src/vm/Compartment.h create mode 100644 js/src/vm/CompilationAndEvaluation.cpp create mode 100644 js/src/vm/CompletionKind.h create mode 100644 js/src/vm/Compression.cpp create mode 100644 js/src/vm/Compression.h create mode 100644 js/src/vm/DateObject.h create mode 100644 js/src/vm/DateTime.cpp create mode 100644 js/src/vm/DateTime.h create mode 100644 js/src/vm/EnvironmentObject-inl.h create mode 100644 js/src/vm/EnvironmentObject.cpp create mode 100644 js/src/vm/EnvironmentObject.h create mode 100644 js/src/vm/EqualityOperations.cpp create mode 100644 js/src/vm/EqualityOperations.h create mode 100644 js/src/vm/ErrorMessages.cpp create mode 100644 js/src/vm/ErrorObject-inl.h create mode 100644 js/src/vm/ErrorObject.cpp create mode 100644 js/src/vm/ErrorObject.h create mode 100644 js/src/vm/ErrorReporting.cpp create mode 100644 js/src/vm/ErrorReporting.h create mode 100644 js/src/vm/Exception.cpp create mode 100644 js/src/vm/ForOfIterator.cpp create mode 100644 js/src/vm/FrameIter-inl.h create mode 100644 js/src/vm/FrameIter.cpp create mode 100644 js/src/vm/FrameIter.h create mode 100644 js/src/vm/FunctionFlags.cpp create mode 100644 js/src/vm/FunctionFlags.h create mode 100644 js/src/vm/FunctionPrefixKind.h create mode 100644 js/src/vm/GeckoProfiler-inl.h create mode 100644 js/src/vm/GeckoProfiler.cpp create mode 100644 js/src/vm/GeckoProfiler.h create mode 100644 js/src/vm/GeneratorAndAsyncKind.h create mode 100644 js/src/vm/GeneratorObject.cpp create mode 100644 js/src/vm/GeneratorObject.h create mode 100644 js/src/vm/GeneratorResumeKind.h create mode 100644 js/src/vm/GetterSetter.cpp create mode 100644 js/src/vm/GetterSetter.h create mode 100644 js/src/vm/GlobalObject-inl.h create mode 100644 js/src/vm/GlobalObject.cpp create mode 100644 js/src/vm/GlobalObject.h create mode 100644 js/src/vm/HelperThreadState.h create mode 100644 js/src/vm/HelperThreadTask.h create mode 100644 js/src/vm/HelperThreads.cpp create mode 100644 js/src/vm/HelperThreads.h create mode 100644 js/src/vm/Id.cpp create mode 100644 js/src/vm/Initialization.cpp create mode 100644 js/src/vm/InlineCharBuffer-inl.h create mode 100644 js/src/vm/InternalThreadPool.cpp create mode 100644 js/src/vm/InternalThreadPool.h create mode 100644 js/src/vm/Interpreter-inl.h create mode 100644 js/src/vm/Interpreter.cpp create mode 100644 js/src/vm/Interpreter.h create mode 100644 js/src/vm/IsGivenTypeObject-inl.h create mode 100644 js/src/vm/Iteration.cpp create mode 100644 js/src/vm/Iteration.h create mode 100644 js/src/vm/JSAtom-inl.h create mode 100644 js/src/vm/JSAtom.cpp create mode 100644 js/src/vm/JSAtom.h create mode 100644 js/src/vm/JSAtomState.h create mode 100644 js/src/vm/JSContext-inl.h create mode 100644 js/src/vm/JSContext.cpp create mode 100644 js/src/vm/JSContext.h create mode 100644 js/src/vm/JSFunction-inl.h create mode 100644 js/src/vm/JSFunction.cpp create mode 100644 js/src/vm/JSFunction.h create mode 100644 js/src/vm/JSONParser.cpp create mode 100644 js/src/vm/JSONParser.h create mode 100644 js/src/vm/JSONPrinter.cpp create mode 100644 js/src/vm/JSONPrinter.h create mode 100644 js/src/vm/JSObject-inl.h create mode 100644 js/src/vm/JSObject.cpp create mode 100644 js/src/vm/JSObject.h create mode 100644 js/src/vm/JSScript-inl.h create mode 100644 js/src/vm/JSScript.cpp create mode 100644 js/src/vm/JSScript.h create mode 100644 js/src/vm/JitActivation.cpp create mode 100644 js/src/vm/JitActivation.h create mode 100644 js/src/vm/List-inl.h create mode 100644 js/src/vm/List.cpp create mode 100644 js/src/vm/List.h create mode 100644 js/src/vm/MallocProvider.h create mode 100644 js/src/vm/MatchPairs.h create mode 100644 js/src/vm/MemoryMetrics.cpp create mode 100644 js/src/vm/ModuleBuilder.h create mode 100644 js/src/vm/Modules.cpp create mode 100644 js/src/vm/Modules.h create mode 100644 js/src/vm/Monitor.h create mode 100644 js/src/vm/MutexIDs.h create mode 100644 js/src/vm/NativeObject-inl.h create mode 100644 js/src/vm/NativeObject.cpp create mode 100644 js/src/vm/NativeObject.h create mode 100644 js/src/vm/NumberObject-inl.h create mode 100644 js/src/vm/NumberObject.h create mode 100644 js/src/vm/ObjectFlags-inl.h create mode 100644 js/src/vm/ObjectFlags.h create mode 100644 js/src/vm/ObjectOperations-inl.h create mode 100644 js/src/vm/ObjectOperations.h create mode 100644 js/src/vm/OffThreadPromiseRuntimeState.cpp create mode 100644 js/src/vm/OffThreadPromiseRuntimeState.h create mode 100644 js/src/vm/OffThreadScriptCompilation.cpp create mode 100644 js/src/vm/Opcodes.h create mode 100644 js/src/vm/PIC.cpp create mode 100644 js/src/vm/PIC.h create mode 100644 js/src/vm/PlainObject-inl.h create mode 100644 js/src/vm/PlainObject.cpp create mode 100644 js/src/vm/PlainObject.h create mode 100644 js/src/vm/Printer.cpp create mode 100644 js/src/vm/Probes-inl.h create mode 100644 js/src/vm/Probes.cpp create mode 100644 js/src/vm/Probes.h create mode 100644 js/src/vm/ProfilingStack.cpp create mode 100644 js/src/vm/PromiseLookup.cpp create mode 100644 js/src/vm/PromiseLookup.h create mode 100644 js/src/vm/PromiseObject.h create mode 100644 js/src/vm/PropMap-inl.h create mode 100644 js/src/vm/PropMap.cpp create mode 100644 js/src/vm/PropMap.h create mode 100644 js/src/vm/PropertyAndElement.cpp create mode 100644 js/src/vm/PropertyDescriptor.cpp create mode 100644 js/src/vm/PropertyInfo.h create mode 100644 js/src/vm/PropertyKey.h create mode 100644 js/src/vm/PropertyResult.h create mode 100644 js/src/vm/ProxyObject.cpp create mode 100644 js/src/vm/ProxyObject.h create mode 100644 js/src/vm/Realm-inl.h create mode 100644 js/src/vm/Realm.cpp create mode 100644 js/src/vm/Realm.h create mode 100644 js/src/vm/RecordTupleShared.cpp create mode 100644 js/src/vm/RecordTupleShared.h create mode 100644 js/src/vm/RecordType.cpp create mode 100644 js/src/vm/RecordType.h create mode 100644 js/src/vm/RegExpObject.cpp create mode 100644 js/src/vm/RegExpObject.h create mode 100644 js/src/vm/RegExpShared.h create mode 100644 js/src/vm/RegExpStatics.cpp create mode 100644 js/src/vm/RegExpStatics.h create mode 100644 js/src/vm/Runtime.cpp create mode 100644 js/src/vm/Runtime.h create mode 100644 js/src/vm/SavedFrame.h create mode 100644 js/src/vm/SavedStacks-inl.h create mode 100644 js/src/vm/SavedStacks.cpp create mode 100644 js/src/vm/SavedStacks.h create mode 100644 js/src/vm/Scope.cpp create mode 100644 js/src/vm/Scope.h create mode 100644 js/src/vm/ScopeKind.h create mode 100644 js/src/vm/SelfHosting.cpp create mode 100644 js/src/vm/SelfHosting.h create mode 100644 js/src/vm/Shape-inl.h create mode 100644 js/src/vm/Shape.cpp create mode 100644 js/src/vm/Shape.h create mode 100644 js/src/vm/ShapeZone.cpp create mode 100644 js/src/vm/ShapeZone.h create mode 100644 js/src/vm/SharedArrayObject.cpp create mode 100644 js/src/vm/SharedArrayObject.h create mode 100644 js/src/vm/SharedImmutableStringsCache-inl.h create mode 100644 js/src/vm/SharedImmutableStringsCache.cpp create mode 100644 js/src/vm/SharedImmutableStringsCache.h create mode 100644 js/src/vm/SharedMem.h create mode 100644 js/src/vm/SharedScriptDataTableHolder.cpp create mode 100644 js/src/vm/SharedScriptDataTableHolder.h create mode 100644 js/src/vm/SharedStencil.h create mode 100644 js/src/vm/SourceHook.cpp create mode 100644 js/src/vm/Stack-inl.h create mode 100644 js/src/vm/Stack.cpp create mode 100644 js/src/vm/Stack.h create mode 100644 js/src/vm/StaticStrings.cpp create mode 100644 js/src/vm/StaticStrings.h create mode 100644 js/src/vm/StencilCache.cpp create mode 100644 js/src/vm/StencilCache.h create mode 100644 js/src/vm/StencilEnums.h create mode 100644 js/src/vm/StencilObject.cpp create mode 100644 js/src/vm/StencilObject.h create mode 100644 js/src/vm/StringObject-inl.h create mode 100644 js/src/vm/StringObject.h create mode 100644 js/src/vm/StringType-inl.h create mode 100644 js/src/vm/StringType.cpp create mode 100644 js/src/vm/StringType.h create mode 100644 js/src/vm/StructuredClone.cpp create mode 100644 js/src/vm/SymbolType.cpp create mode 100644 js/src/vm/SymbolType.h create mode 100644 js/src/vm/TaggedProto.cpp create mode 100644 js/src/vm/TaggedProto.h create mode 100644 js/src/vm/ThrowMsgKind.cpp create mode 100644 js/src/vm/ThrowMsgKind.h create mode 100644 js/src/vm/Time.cpp create mode 100644 js/src/vm/Time.h create mode 100644 js/src/vm/ToSource.cpp create mode 100644 js/src/vm/ToSource.h create mode 100644 js/src/vm/TupleType.cpp create mode 100644 js/src/vm/TupleType.h create mode 100644 js/src/vm/TypedArrayObject-inl.h create mode 100644 js/src/vm/TypedArrayObject.cpp create mode 100644 js/src/vm/TypedArrayObject.h create mode 100644 js/src/vm/UbiNode.cpp create mode 100644 js/src/vm/UbiNodeCensus.cpp create mode 100644 js/src/vm/UbiNodeShortestPaths.cpp create mode 100644 js/src/vm/Uint8Clamped.h create mode 100644 js/src/vm/UsageStatistics.cpp create mode 100644 js/src/vm/Value.cpp create mode 100644 js/src/vm/Warnings.cpp create mode 100644 js/src/vm/Warnings.h create mode 100644 js/src/vm/Watchtower.cpp create mode 100644 js/src/vm/Watchtower.h create mode 100644 js/src/vm/WellKnownAtom.cpp create mode 100644 js/src/vm/WellKnownAtom.h create mode 100644 js/src/vm/WindowProxy.cpp create mode 100644 js/src/vm/WrapperObject.h create mode 100644 js/src/vm/Xdr.cpp create mode 100644 js/src/vm/Xdr.h create mode 100644 js/src/vm/jsopcode.py create mode 100755 js/src/vm/make_opcode_doc.py (limited to 'js/src/vm') diff --git a/js/src/vm/Activation-inl.h b/js/src/vm/Activation-inl.h new file mode 100644 index 0000000000..1ee1439beb --- /dev/null +++ b/js/src/vm/Activation-inl.h @@ -0,0 +1,172 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_Activation_inl_h +#define vm_Activation_inl_h + +#include "vm/Activation.h" + +#include "mozilla/Assertions.h" // MOZ_ASSERT{,_IF}, MOZ_CRASH +#include "mozilla/Likely.h" // MOZ_UNLIKELY +#include "mozilla/Maybe.h" // mozilla::Maybe + +#include "jit/CalleeToken.h" // js::jit::CalleeToken +#include "js/Debug.h" // JS::dbg::AutoEntryMonitor +#include "vm/FrameIter.h" // js::FrameIter +#include "vm/JitActivation.h" // js::jit::JitActivation +#include "vm/JSContext.h" // JSContext +#include "vm/Stack.h" // js::AbstractFramePtr + +namespace js { + +inline ActivationEntryMonitor::ActivationEntryMonitor(JSContext* cx) + : cx_(cx), entryMonitor_(cx->entryMonitor) { + cx->entryMonitor = nullptr; +} + +inline ActivationEntryMonitor::ActivationEntryMonitor( + JSContext* cx, InterpreterFrame* entryFrame) + : ActivationEntryMonitor(cx) { + if (MOZ_UNLIKELY(entryMonitor_)) { + init(cx, entryFrame); + } +} + +inline ActivationEntryMonitor::ActivationEntryMonitor( + JSContext* cx, jit::CalleeToken entryToken) + : ActivationEntryMonitor(cx) { + if (MOZ_UNLIKELY(entryMonitor_)) { + init(cx, entryToken); + } +} + +inline ActivationEntryMonitor::~ActivationEntryMonitor() { + if (entryMonitor_) { + entryMonitor_->Exit(cx_); + } + + cx_->entryMonitor = entryMonitor_; +} + +inline Activation::Activation(JSContext* cx, Kind kind) + : cx_(cx), + compartment_(cx->compartment()), + prev_(cx->activation_), + prevProfiling_(prev_ ? prev_->mostRecentProfiling() : nullptr), + hideScriptedCallerCount_(0), + frameCache_(cx), + asyncStack_(cx, cx->asyncStackForNewActivations()), + asyncCause_(cx->asyncCauseForNewActivations), + asyncCallIsExplicit_(cx->asyncCallIsExplicit), + kind_(kind) { + cx->asyncStackForNewActivations() = nullptr; + cx->asyncCauseForNewActivations = nullptr; + cx->asyncCallIsExplicit = false; + cx->activation_ = this; +} + +inline Activation::~Activation() { + MOZ_ASSERT_IF(isProfiling(), this != cx_->profilingActivation_); + MOZ_ASSERT(cx_->activation_ == this); + MOZ_ASSERT(hideScriptedCallerCount_ == 0); + cx_->activation_ = prev_; + cx_->asyncCauseForNewActivations = asyncCause_; + cx_->asyncStackForNewActivations() = asyncStack_; + cx_->asyncCallIsExplicit = asyncCallIsExplicit_; +} + +inline bool Activation::isProfiling() const { + if (isInterpreter()) { + return asInterpreter()->isProfiling(); + } + + MOZ_ASSERT(isJit()); + return asJit()->isProfiling(); +} + +inline Activation* Activation::mostRecentProfiling() { + if (isProfiling()) { + return this; + } + return prevProfiling_; +} + +inline LiveSavedFrameCache* Activation::getLiveSavedFrameCache(JSContext* cx) { + if (!frameCache_.get().initialized() && !frameCache_.get().init(cx)) { + return nullptr; + } + return frameCache_.address(); +} + +/* static */ inline mozilla::Maybe +LiveSavedFrameCache::FramePtr::create(const FrameIter& iter) { + if (iter.done()) { + return mozilla::Nothing(); + } + + if (iter.isPhysicalJitFrame()) { + return mozilla::Some(FramePtr(iter.physicalJitFrame())); + } + + if (!iter.hasUsableAbstractFramePtr()) { + return mozilla::Nothing(); + } + + auto afp = iter.abstractFramePtr(); + + if (afp.isInterpreterFrame()) { + return mozilla::Some(FramePtr(afp.asInterpreterFrame())); + } + if (afp.isWasmDebugFrame()) { + return mozilla::Some(FramePtr(afp.asWasmDebugFrame())); + } + if (afp.isRematerializedFrame()) { + return mozilla::Some(FramePtr(afp.asRematerializedFrame())); + } + + MOZ_CRASH("unexpected frame type"); +} + +struct LiveSavedFrameCache::FramePtr::HasCachedMatcher { + template + bool operator()(Frame* f) const { + return f->hasCachedSavedFrame(); + } +}; + +inline bool LiveSavedFrameCache::FramePtr::hasCachedSavedFrame() const { + return ptr.match(HasCachedMatcher()); +} + +struct LiveSavedFrameCache::FramePtr::SetHasCachedMatcher { + template + void operator()(Frame* f) { + f->setHasCachedSavedFrame(); + } +}; + +inline void LiveSavedFrameCache::FramePtr::setHasCachedSavedFrame() { + ptr.match(SetHasCachedMatcher()); +} + +struct LiveSavedFrameCache::FramePtr::ClearHasCachedMatcher { + template + void operator()(Frame* f) { + f->clearHasCachedSavedFrame(); + } +}; + +inline void LiveSavedFrameCache::FramePtr::clearHasCachedSavedFrame() { + ptr.match(ClearHasCachedMatcher()); +} + +inline bool Activation::hasWasmExitFP() const { + return isJit() && asJit()->hasWasmExitFP(); +} + +} // namespace js + +#endif // vm_Activation_inl_h diff --git a/js/src/vm/Activation.cpp b/js/src/vm/Activation.cpp new file mode 100644 index 0000000000..19b383c9c5 --- /dev/null +++ b/js/src/vm/Activation.cpp @@ -0,0 +1,84 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/Activation-inl.h" + +#include "mozilla/Assertions.h" // MOZ_ASSERT + +#include "gc/GC.h" // js::gc::AutoSuppressGC +#include "jit/CalleeToken.h" // js::jit::CalleeToken{IsFunction,To{Function,Script}} +#include "js/RootingAPI.h" // JS::Rooted +#include "js/Value.h" // JS::Value +#include "vm/JSContext.h" // JSContext, js::TlsContext +#include "vm/Stack.h" // js::InterpreterFrame + +#include "vm/Compartment-inl.h" // JS::Compartment::wrap + +using namespace js; + +using JS::ObjectOrNullValue; +using JS::Rooted; +using JS::UndefinedValue; +using JS::Value; + +Value ActivationEntryMonitor::asyncStack(JSContext* cx) { + Rooted stack(cx, ObjectOrNullValue(cx->asyncStackForNewActivations())); + if (!cx->compartment()->wrap(cx, &stack)) { + cx->clearPendingException(); + return UndefinedValue(); + } + return stack; +} + +void ActivationEntryMonitor::init(JSContext* cx, InterpreterFrame* entryFrame) { + // The InterpreterFrame is not yet part of an Activation, so it won't + // be traced if we trigger GC here. Suppress GC to avoid this. + gc::AutoSuppressGC suppressGC(cx); + Rooted stack(cx, asyncStack(cx)); + const char* asyncCause = cx->asyncCauseForNewActivations; + if (entryFrame->isFunctionFrame()) { + entryMonitor_->Entry(cx, &entryFrame->callee(), stack, asyncCause); + } else { + entryMonitor_->Entry(cx, entryFrame->script(), stack, asyncCause); + } +} + +void ActivationEntryMonitor::init(JSContext* cx, jit::CalleeToken entryToken) { + // The CalleeToken is not traced at this point and we also don't want + // a GC to discard the code we're about to enter, so we suppress GC. + gc::AutoSuppressGC suppressGC(cx); + RootedValue stack(cx, asyncStack(cx)); + const char* asyncCause = cx->asyncCauseForNewActivations; + if (jit::CalleeTokenIsFunction(entryToken)) { + entryMonitor_->Entry(cx_, jit::CalleeTokenToFunction(entryToken), stack, + asyncCause); + } else { + entryMonitor_->Entry(cx_, jit::CalleeTokenToScript(entryToken), stack, + asyncCause); + } +} + +void Activation::registerProfiling() { + MOZ_ASSERT(isProfiling()); + cx_->profilingActivation_ = this; +} + +void Activation::unregisterProfiling() { + MOZ_ASSERT(isProfiling()); + MOZ_ASSERT(cx_->profilingActivation_ == this); + cx_->profilingActivation_ = prevProfiling_; +} + +ActivationIterator::ActivationIterator(JSContext* cx) + : activation_(cx->activation_) { + MOZ_ASSERT(cx == TlsContext.get()); +} + +ActivationIterator& ActivationIterator::operator++() { + MOZ_ASSERT(activation_); + activation_ = activation_->prev(); + return *this; +} diff --git a/js/src/vm/Activation.h b/js/src/vm/Activation.h new file mode 100644 index 0000000000..4153e27478 --- /dev/null +++ b/js/src/vm/Activation.h @@ -0,0 +1,565 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_Activation_h +#define vm_Activation_h + +#include "mozilla/Assertions.h" // MOZ_ASSERT +#include "mozilla/Attributes.h" // MOZ_RAII + +#include // size_t +#include // uint8_t, uint32_t + +#include "jstypes.h" // JS_PUBLIC_API + +#include "jit/CalleeToken.h" // js::jit::CalleeToken +#include "js/RootingAPI.h" // JS::Handle, JS::Rooted +#include "js/TypeDecls.h" // jsbytecode +#include "js/Value.h" // JS::Value +#include "vm/SavedFrame.h" // js::SavedFrame +#include "vm/Stack.h" // js::InterpreterRegs + +struct JS_PUBLIC_API JSContext; + +class JSFunction; +class JSObject; +class JSScript; + +namespace JS { + +class CallArgs; +class JS_PUBLIC_API Compartment; + +namespace dbg { +class JS_PUBLIC_API AutoEntryMonitor; +} // namespace dbg + +} // namespace JS + +namespace js { + +class InterpreterActivation; + +namespace jit { +class JitActivation; +} // namespace jit + +// This class is separate from Activation, because it calls Compartment::wrap() +// which can GC and walk the stack. It's not safe to do that within the +// JitActivation constructor. +class MOZ_RAII ActivationEntryMonitor { + JSContext* cx_; + + // The entry point monitor that was set on cx_->runtime() when this + // ActivationEntryMonitor was created. + JS::dbg::AutoEntryMonitor* entryMonitor_; + + explicit inline ActivationEntryMonitor(JSContext* cx); + + ActivationEntryMonitor(const ActivationEntryMonitor& other) = delete; + void operator=(const ActivationEntryMonitor& other) = delete; + + void init(JSContext* cx, jit::CalleeToken entryToken); + void init(JSContext* cx, InterpreterFrame* entryFrame); + + JS::Value asyncStack(JSContext* cx); + + public: + inline ActivationEntryMonitor(JSContext* cx, InterpreterFrame* entryFrame); + inline ActivationEntryMonitor(JSContext* cx, jit::CalleeToken entryToken); + inline ~ActivationEntryMonitor(); +}; + +// [SMDOC] LiveSavedFrameCache: SavedFrame caching to minimize stack walking +// +// Since each SavedFrame object includes a 'parent' pointer to the SavedFrame +// for its caller, if we could easily find the right SavedFrame for a given +// stack frame, we wouldn't need to walk the rest of the stack. Traversing deep +// stacks can be expensive, and when we're profiling or instrumenting code, we +// may want to capture JavaScript stacks frequently, so such cases would benefit +// if we could avoid walking the entire stack. +// +// We could have a cache mapping frame addresses to their SavedFrame objects, +// but invalidating its entries would be a challenge. Popping a stack frame is +// extremely performance-sensitive, and SpiderMonkey stack frames can be OSR'd, +// thrown, rematerialized, and perhaps meet other fates; we would rather our +// cache not depend on handling so many tricky cases. +// +// It turns out that we can keep the cache accurate by reserving a single bit in +// the stack frame, which must be clear on any newly pushed frame. When we +// insert an entry into the cache mapping a given frame address to its +// SavedFrame, we set the bit in the frame. Then, we take care to probe the +// cache only for frames whose bit is set; the bit tells us that the frame has +// never left the stack, so its cache entry must be accurate, at least about +// which function the frame is executing (the line may have changed; more about +// that below). The code refers to this bit as the 'hasCachedSavedFrame' flag. +// +// We could manage such a cache replacing least-recently used entries, but we +// can do better than that: the cache can be a stack, of which we need examine +// only entries from the top. +// +// First, observe that stacks are walked from the youngest frame to the oldest, +// but SavedFrame chains are built from oldest to youngest, to ensure common +// tails are shared. This means that capturing a stack is necessarily a +// two-phase process: walk the stack, and then build the SavedFrames. +// +// Naturally, the first time we capture the stack, the cache is empty, and we +// must traverse the entire stack. As we build each SavedFrame, we push an entry +// associating the frame's address to its SavedFrame on the cache, and set the +// frame's bit. At the end, every frame has its bit set and an entry in the +// cache. +// +// Then the program runs some more. Some, none, or all of the frames are popped. +// Any new frames are pushed with their bit clear. Any frame with its bit set +// has never left the stack. The cache is left untouched. +// +// For the next capture, we walk the stack up to the first frame with its bit +// set, if there is one. Call it F; it must have a cache entry. We pop entries +// from the cache - all invalid, because they are above F's entry, and hence +// younger - until we find the entry matching F's address. Since F's bit is set, +// we know it never left the stack, and hence that no younger frame could have +// had a colliding address. And since the frame's bit was set when we pushed the +// cache entry, we know the entry is still valid. +// +// F's cache entry's SavedFrame covers the rest of the stack, so we don't need +// to walk the stack any further. Now we begin building SavedFrame objects for +// the new frames, pushing cache entries, and setting bits on the frames. By the +// end, the cache again covers the full stack, and every frame's bit is set. +// +// If we walk the stack to the end, and find no frame with its bit set, then the +// entire cache is invalid. At this point, it must be emptied, so that the new +// entries we are about to push are the only frames in the cache. +// +// For example, suppose we have the following stack (let 'A > B' mean "A called +// B", so the frames are listed oldest first): +// +// P > Q > R > S Initial stack, bits not set. +// P* > Q* > R* > S* Capture a SavedFrame stack, set bits. +// The cache now holds: P > Q > R > S. +// P* > Q* > R* Return from S. +// P* > Q* Return from R. +// P* > Q* > T > U Call T and U. New frames have clear bits. +// +// If we capture the stack now, the cache still holds: +// +// P > Q > R > S +// +// As we traverse the stack, we'll cross U and T, and then find Q with its bit +// set. We pop entries from the cache until we find the entry for Q; this +// removes entries R and S, which were indeed invalid. In Q's cache entry, we +// find the SavedFrame representing the stack P > Q. Now we build SavedFrames +// for the new portion of the stack, pushing an entry for T and setting the bit +// on the frame, and then doing the same for U. In the end, the call stack again +// has bits set on all its frames: +// +// P* > Q* > T* > U* All frames are now in the cache. +// +// And the cache again holds entries for the entire stack: +// +// P > Q > T > U +// +// Details: +// +// - When we find a cache entry whose frame address matches our frame F, we know +// that F has never left the stack, but it may certainly be the case that +// execution took place in that frame, and that the current source position +// within F's function has changed. This means that the entry's SavedFrame, +// which records the source line and column as well as the function, is not +// correct. To detect this case, when we push a cache entry, we record the +// frame's pc. When consulting the cache, if a frame's address matches but its +// pc does not, then we pop the cache entry, clear the frame's bit, and +// continue walking the stack. The next stack frame will definitely hit: since +// its callee frame never left the stack, the calling frame never got the +// chance to execute. +// +// - Generators, at least conceptually, have long-lived stack frames that +// disappear from the stack when the generator yields, and reappear on the +// stack when the generator's 'next' method is called. When a generator's +// frame is placed again atop the stack, its bit must be cleared - for the +// purposes of the cache, treating the frame as a new frame - to respect the +// invariants we used to justify the algorithm above. Async function +// activations usually appear atop empty stacks, since they are invoked as a +// promise callback, but the same rule applies. +// +// - SpiderMonkey has many types of stack frames, and not all have a place to +// store a bit indicating a cached SavedFrame. But as long as we don't create +// cache entries for frames we can't mark, simply omitting them from the cache +// is harmless. Uncacheable frame types include inlined Ion frames and +// non-Debug wasm frames. The LiveSavedFrameCache::FramePtr type represents +// only pointers to frames that can be cached, so if you have a FramePtr, you +// don't need to further check the frame for cachability. FramePtr provides +// access to the hasCachedSavedFrame bit. +// +// - We actually break up the cache into one cache per Activation. Popping an +// activation invalidates all its cache entries, simply by freeing the cache +// altogether. +// +// - The entire chain of SavedFrames for a given stack capture is created in the +// compartment of the code that requested the capture, *not* in that of the +// frames it represents, so in general, different compartments may have +// different SavedFrame objects representing the same actual stack frame. The +// LiveSavedFrameCache simply records whichever SavedFrames were used in the +// most recent captures. When we find a cache hit, we check the entry's +// SavedFrame's compartment against the current compartment; if they do not +// match, we clear the entire cache. +// +// This means that it is not always true that, if a frame's +// hasCachedSavedFrame bit is set, it must have an entry in the cache. The +// actual invariant is: either the cache is completely empty, or the frames' +// bits are trustworthy. This invariant holds even though capture can be +// interrupted at many places by OOM failures. Clearing the cache is a single, +// uninterruptible step. When we try to look up a frame whose bit is set and +// find an empty cache, we clear the frame's bit. And we only add the first +// frame to an empty cache once we've walked the stack all the way, so we know +// that all frames' bits are cleared by that point. +// +// - When the Debugger API evaluates an expression in some frame (the 'target +// frame'), it's SpiderMonkey's convention that the target frame be treated as +// the parent of the eval frame. In reality, of course, the eval frame is +// pushed on the top of the stack like any other frame, but stack captures +// simply jump straight over the intervening frames, so that the '.parent' +// property of a SavedFrame for the eval is the SavedFrame for the target. +// This is arranged by giving the eval frame an 'evalInFramePrev` link +// pointing to the target, which an ordinary FrameIter will notice and +// respect. +// +// If the LiveSavedFrameCache were presented with stack traversals that +// skipped frames in this way, it would cause havoc. First, with no debugger +// eval frames present, capture the stack, populating the cache. Then push a +// debugger eval frame and capture again; the skipped frames to appear to be +// absent from the stack. Now pop the debugger eval frame, and capture a third +// time: the no-longer-skipped frames seem to reappear on the stack, with +// their cached bits still set. +// +// The LiveSavedFrameCache assumes that the stack it sees is used in a +// stack-like fashion: if a frame has its bit set, it has never left the +// stack. To support this assumption, when the cache is in use, we do not skip +// the frames between a debugger eval frame an its target; we always traverse +// the entire stack, invalidating and populating the cache in the usual way. +// Instead, when we construct a SavedFrame for a debugger eval frame, we +// select the appropriate parent at that point: rather than the next-older +// frame, we find the SavedFrame for the eval's target frame. The skip appears +// in the SavedFrame chains, even as the traversal covers all the frames. +// +// - Rematerialized frames (see ../jit/RematerializedFrame.h) are always created +// with their hasCachedSavedFrame bits clear: although there may be extant +// SavedFrames built from the original IonMonkey frame, the Rematerialized +// frames will not have cache entries for them until they are traversed in a +// capture themselves. +// +// This means that, oddly, it is not always true that, once we reach a frame +// with its hasCachedSavedFrame bit set, all its parents will have the bit set +// as well. However, clear bits under younger set bits will only occur on +// Rematerialized frames. +class LiveSavedFrameCache { + public: + // The address of a live frame for which we can cache SavedFrames: it has a + // 'hasCachedSavedFrame' bit we can examine and set, and can be converted to + // a Key to index the cache. + class FramePtr { + // We use jit::CommonFrameLayout for both Baseline frames and Ion + // physical frames. + using Ptr = mozilla::Variant; + + Ptr ptr; + + template + explicit FramePtr(Frame ptr) : ptr(ptr) {} + + struct HasCachedMatcher; + struct SetHasCachedMatcher; + struct ClearHasCachedMatcher; + + public: + // If iter's frame is of a type that can be cached, construct a FramePtr + // for its frame. Otherwise, return Nothing. + static inline mozilla::Maybe create(const FrameIter& iter); + + inline bool hasCachedSavedFrame() const; + inline void setHasCachedSavedFrame(); + inline void clearHasCachedSavedFrame(); + + // Return true if this FramePtr refers to an interpreter frame. + inline bool isInterpreterFrame() const { + return ptr.is(); + } + + // If this FramePtr is an interpreter frame, return a pointer to it. + inline InterpreterFrame& asInterpreterFrame() const { + return *ptr.as(); + } + + // Return true if this FramePtr refers to a rematerialized frame. + inline bool isRematerializedFrame() const { + return ptr.is(); + } + + bool operator==(const FramePtr& rhs) const { return rhs.ptr == this->ptr; } + bool operator!=(const FramePtr& rhs) const { return !(rhs == *this); } + }; + + private: + // A key in the cache: the address of a frame, live or dead, for which we + // can cache SavedFrames. Since the pointer may not be live, the only + // operation this type permits is comparison. + class Key { + FramePtr framePtr; + + public: + MOZ_IMPLICIT Key(const FramePtr& framePtr) : framePtr(framePtr) {} + + bool operator==(const Key& rhs) const { + return rhs.framePtr == this->framePtr; + } + bool operator!=(const Key& rhs) const { return !(rhs == *this); } + }; + + struct Entry { + const Key key; + const jsbytecode* pc; + HeapPtr savedFrame; + + Entry(const Key& key, const jsbytecode* pc, SavedFrame* savedFrame) + : key(key), pc(pc), savedFrame(savedFrame) {} + }; + + using EntryVector = Vector; + EntryVector* frames; + + LiveSavedFrameCache(const LiveSavedFrameCache&) = delete; + LiveSavedFrameCache& operator=(const LiveSavedFrameCache&) = delete; + + public: + explicit LiveSavedFrameCache() : frames(nullptr) {} + + LiveSavedFrameCache(LiveSavedFrameCache&& rhs) : frames(rhs.frames) { + MOZ_ASSERT(this != &rhs, "self-move disallowed"); + rhs.frames = nullptr; + } + + ~LiveSavedFrameCache() { + if (frames) { + js_delete(frames); + frames = nullptr; + } + } + + bool initialized() const { return !!frames; } + bool init(JSContext* cx) { + frames = js_new(); + if (!frames) { + JS_ReportOutOfMemory(cx); + return false; + } + return true; + } + + void trace(JSTracer* trc); + + // Set |frame| to the cached SavedFrame corresponding to |framePtr| at |pc|. + // |framePtr|'s hasCachedSavedFrame bit must be set. Remove all cache + // entries for frames younger than that one. + // + // This may set |frame| to nullptr if |pc| is different from the pc supplied + // when the cache entry was inserted. In this case, the cached SavedFrame + // (probably) has the wrong source position. Entries for younger frames are + // still removed. The next frame, if any, will be a cache hit. + // + // This may also set |frame| to nullptr if the cache was populated with + // SavedFrame objects for a different compartment than cx's current + // compartment. In this case, the entire cache is flushed. + void find(JSContext* cx, FramePtr& framePtr, const jsbytecode* pc, + MutableHandle frame) const; + + // Search the cache for a frame matching |framePtr|, without removing any + // entries. Return the matching saved frame, or nullptr if none is found. + // This is used for resolving |evalInFramePrev| links. + void findWithoutInvalidation(const FramePtr& framePtr, + MutableHandle frame) const; + + // Push a cache entry mapping |framePtr| and |pc| to |savedFrame| on the top + // of the cache's stack. You must insert entries for frames from oldest to + // youngest. They must all be younger than the frame that the |find| method + // found a hit for; or you must have cleared the entire cache with the + // |clear| method. + bool insert(JSContext* cx, FramePtr&& framePtr, const jsbytecode* pc, + Handle savedFrame); + + // Remove all entries from the cache. + void clear() { + if (frames) frames->clear(); + } +}; + +static_assert( + sizeof(LiveSavedFrameCache) == sizeof(uintptr_t), + "Every js::Activation has a LiveSavedFrameCache, so we need to be pretty " + "careful " + "about avoiding bloat. If you're adding members to LiveSavedFrameCache, " + "maybe you " + "should consider figuring out a way to make js::Activation have a " + "LiveSavedFrameCache* instead of a Rooted."); + +class Activation { + protected: + JSContext* cx_; + JS::Compartment* compartment_; + Activation* prev_; + Activation* prevProfiling_; + + // Counter incremented by JS::HideScriptedCaller and decremented by + // JS::UnhideScriptedCaller. If > 0 for the top activation, + // DescribeScriptedCaller will return null instead of querying that + // activation, which should prompt the caller to consult embedding-specific + // data structures instead. + size_t hideScriptedCallerCount_; + + // The cache of SavedFrame objects we have already captured when walking + // this activation's stack. + JS::Rooted frameCache_; + + // Youngest saved frame of an async stack that will be iterated during stack + // capture in place of the actual stack of previous activations. Note that + // the stack of this activation is captured entirely before this is used. + // + // Usually this is nullptr, meaning that normal stack capture will occur. + // When this is set, the stack of any previous activation is ignored. + JS::Rooted asyncStack_; + + // Value of asyncCause to be attached to asyncStack_. + const char* asyncCause_; + + // True if the async call was explicitly requested, e.g. via + // callFunctionWithAsyncStack. + bool asyncCallIsExplicit_; + + enum Kind { Interpreter, Jit }; + Kind kind_; + + inline Activation(JSContext* cx, Kind kind); + inline ~Activation(); + + public: + JSContext* cx() const { return cx_; } + JS::Compartment* compartment() const { return compartment_; } + Activation* prev() const { return prev_; } + Activation* prevProfiling() const { return prevProfiling_; } + inline Activation* mostRecentProfiling(); + + bool isInterpreter() const { return kind_ == Interpreter; } + bool isJit() const { return kind_ == Jit; } + inline bool hasWasmExitFP() const; + + inline bool isProfiling() const; + void registerProfiling(); + void unregisterProfiling(); + + InterpreterActivation* asInterpreter() const { + MOZ_ASSERT(isInterpreter()); + return (InterpreterActivation*)this; + } + jit::JitActivation* asJit() const { + MOZ_ASSERT(isJit()); + return (jit::JitActivation*)this; + } + + void hideScriptedCaller() { hideScriptedCallerCount_++; } + void unhideScriptedCaller() { + MOZ_ASSERT(hideScriptedCallerCount_ > 0); + hideScriptedCallerCount_--; + } + bool scriptedCallerIsHidden() const { return hideScriptedCallerCount_ > 0; } + + SavedFrame* asyncStack() { return asyncStack_; } + + const char* asyncCause() const { return asyncCause_; } + + bool asyncCallIsExplicit() const { return asyncCallIsExplicit_; } + + inline LiveSavedFrameCache* getLiveSavedFrameCache(JSContext* cx); + void clearLiveSavedFrameCache() { frameCache_.get().clear(); } + + private: + Activation(const Activation& other) = delete; + void operator=(const Activation& other) = delete; +}; + +// This variable holds a special opcode value which is greater than all normal +// opcodes, and is chosen such that the bitwise or of this value with any +// opcode is this value. +constexpr jsbytecode EnableInterruptsPseudoOpcode = -1; + +static_assert(EnableInterruptsPseudoOpcode >= JSOP_LIMIT, + "EnableInterruptsPseudoOpcode must be greater than any opcode"); +static_assert( + EnableInterruptsPseudoOpcode == jsbytecode(-1), + "EnableInterruptsPseudoOpcode must be the maximum jsbytecode value"); + +class InterpreterFrameIterator; +class RunState; + +class InterpreterActivation : public Activation { + friend class js::InterpreterFrameIterator; + + InterpreterRegs regs_; + InterpreterFrame* entryFrame_; + size_t opMask_; // For debugger interrupts, see js::Interpret. + +#ifdef DEBUG + size_t oldFrameCount_; +#endif + + public: + inline InterpreterActivation(RunState& state, JSContext* cx, + InterpreterFrame* entryFrame); + inline ~InterpreterActivation(); + + inline bool pushInlineFrame(const JS::CallArgs& args, + JS::Handle script, + MaybeConstruct constructing); + inline void popInlineFrame(InterpreterFrame* frame); + + inline bool resumeGeneratorFrame(JS::Handle callee, + JS::Handle envChain); + + InterpreterFrame* current() const { return regs_.fp(); } + InterpreterRegs& regs() { return regs_; } + InterpreterFrame* entryFrame() const { return entryFrame_; } + size_t opMask() const { return opMask_; } + + bool isProfiling() const { return false; } + + // If this js::Interpret frame is running |script|, enable interrupts. + void enableInterruptsIfRunning(JSScript* script) { + if (regs_.fp()->script() == script) { + enableInterruptsUnconditionally(); + } + } + void enableInterruptsUnconditionally() { + opMask_ = EnableInterruptsPseudoOpcode; + } + void clearInterruptsMask() { opMask_ = 0; } +}; + +// Iterates over a thread's activation list. +class ActivationIterator { + protected: + Activation* activation_; + + public: + explicit ActivationIterator(JSContext* cx); + + ActivationIterator& operator++(); + + Activation* operator->() const { return activation_; } + Activation* activation() const { return activation_; } + bool done() const { return activation_ == nullptr; } +}; + +} // namespace js + +#endif // vm_Activation_h diff --git a/js/src/vm/ArgumentsObject-inl.h b/js/src/vm/ArgumentsObject-inl.h new file mode 100644 index 0000000000..4ef7493f79 --- /dev/null +++ b/js/src/vm/ArgumentsObject-inl.h @@ -0,0 +1,58 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArgumentsObject_inl_h +#define vm_ArgumentsObject_inl_h + +#include "vm/ArgumentsObject.h" + +#include "vm/EnvironmentObject.h" + +#include "vm/EnvironmentObject-inl.h" + +namespace js { + +inline const Value& ArgumentsObject::element(uint32_t i) const { + MOZ_ASSERT(isElement(i)); + const Value& v = data()->args[i]; + if (IsMagicScopeSlotValue(v)) { + CallObject& callobj = + getFixedSlot(MAYBE_CALL_SLOT).toObject().as(); + return callobj.aliasedFormalFromArguments(v); + } + return v; +} + +inline void ArgumentsObject::setElement(uint32_t i, const Value& v) { + MOZ_ASSERT(isElement(i)); + GCPtr& lhs = data()->args[i]; + if (IsMagicScopeSlotValue(lhs)) { + CallObject& callobj = + getFixedSlot(MAYBE_CALL_SLOT).toObject().as(); + callobj.setAliasedFormalFromArguments(lhs, v); + } else { + lhs = v; + } +} + +inline bool ArgumentsObject::maybeGetElements(uint32_t start, uint32_t count, + Value* vp) { + MOZ_ASSERT(start + count >= start); + + uint32_t length = initialLength(); + if (start > length || start + count > length || hasOverriddenElement()) { + return false; + } + + for (uint32_t i = start, end = start + count; i < end; ++i, ++vp) { + *vp = element(i); + } + return true; +} + +} /* namespace js */ + +#endif /* vm_ArgumentsObject_inl_h */ diff --git a/js/src/vm/ArgumentsObject.cpp b/js/src/vm/ArgumentsObject.cpp new file mode 100644 index 0000000000..3a058e0df8 --- /dev/null +++ b/js/src/vm/ArgumentsObject.cpp @@ -0,0 +1,1182 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/ArgumentsObject-inl.h" + +#include "mozilla/Maybe.h" +#include "mozilla/PodOperations.h" + +#include + +#include "gc/GCContext.h" +#include "jit/CalleeToken.h" +#include "jit/JitFrames.h" +#include "util/BitArray.h" +#include "vm/GlobalObject.h" +#include "vm/Stack.h" +#include "vm/WellKnownAtom.h" // js_*_str + +#include "gc/Nursery-inl.h" +#include "vm/FrameIter-inl.h" // js::FrameIter::unaliasedForEachActual +#include "vm/NativeObject-inl.h" +#include "vm/Stack-inl.h" + +using namespace js; + +/* static */ +size_t RareArgumentsData::bytesRequired(size_t numActuals) { + size_t extraBytes = NumWordsForBitArrayOfLength(numActuals) * sizeof(size_t); + return offsetof(RareArgumentsData, deletedBits_) + extraBytes; +} + +/* static */ +RareArgumentsData* RareArgumentsData::create(JSContext* cx, + ArgumentsObject* obj) { + size_t bytes = RareArgumentsData::bytesRequired(obj->initialLength()); + + uint8_t* data = AllocateObjectBuffer(cx, obj, bytes); + if (!data) { + return nullptr; + } + + mozilla::PodZero(data, bytes); + + AddCellMemory(obj, bytes, MemoryUse::RareArgumentsData); + + return new (data) RareArgumentsData(); +} + +bool ArgumentsObject::createRareData(JSContext* cx) { + MOZ_ASSERT(!data()->rareData); + + RareArgumentsData* rareData = RareArgumentsData::create(cx, this); + if (!rareData) { + return false; + } + + data()->rareData = rareData; + markElementOverridden(); + return true; +} + +bool ArgumentsObject::markElementDeleted(JSContext* cx, uint32_t i) { + RareArgumentsData* data = getOrCreateRareData(cx); + if (!data) { + return false; + } + + data->markElementDeleted(initialLength(), i); + return true; +} + +/* static */ +void ArgumentsObject::MaybeForwardToCallObject(AbstractFramePtr frame, + ArgumentsObject* obj, + ArgumentsData* data) { + JSScript* script = frame.script(); + if (frame.callee()->needsCallObject() && script->argsObjAliasesFormals()) { + obj->initFixedSlot(MAYBE_CALL_SLOT, ObjectValue(frame.callObj())); + for (PositionalFormalParameterIter fi(script); fi; fi++) { + if (fi.closedOver()) { + data->args[fi.argumentSlot()] = MagicEnvSlotValue(fi.location().slot()); + obj->markArgumentForwarded(); + } + } + } +} + +/* static */ +void ArgumentsObject::MaybeForwardToCallObject(JSFunction* callee, + JSObject* callObj, + ArgumentsObject* obj, + ArgumentsData* data) { + JSScript* script = callee->nonLazyScript(); + if (callee->needsCallObject() && script->argsObjAliasesFormals()) { + MOZ_ASSERT(callObj && callObj->is()); + obj->initFixedSlot(MAYBE_CALL_SLOT, ObjectValue(*callObj)); + for (PositionalFormalParameterIter fi(script); fi; fi++) { + if (fi.closedOver()) { + data->args[fi.argumentSlot()] = MagicEnvSlotValue(fi.location().slot()); + obj->markArgumentForwarded(); + } + } + } +} + +struct CopyFrameArgs { + AbstractFramePtr frame_; + + explicit CopyFrameArgs(AbstractFramePtr frame) : frame_(frame) {} + + void copyActualArgs(GCPtr* dst, unsigned numActuals) const { + MOZ_ASSERT_IF(frame_.isInterpreterFrame(), + !frame_.asInterpreterFrame()->runningInJit()); + + // Copy arguments. + Value* src = frame_.argv(); + Value* end = src + numActuals; + while (src != end) { + (dst++)->init(*src++); + } + } + + /* + * If a call object exists and the arguments object aliases formals, the + * call object is the canonical location for formals. + */ + void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) { + ArgumentsObject::MaybeForwardToCallObject(frame_, obj, data); + } +}; + +struct CopyJitFrameArgs { + jit::JitFrameLayout* frame_; + HandleObject callObj_; + + CopyJitFrameArgs(jit::JitFrameLayout* frame, HandleObject callObj) + : frame_(frame), callObj_(callObj) {} + + void copyActualArgs(GCPtr* dst, unsigned numActuals) const { + MOZ_ASSERT(frame_->numActualArgs() == numActuals); + + Value* src = frame_->actualArgs(); + Value* end = src + numActuals; + while (src != end) { + (dst++)->init(*src++); + } + } + + /* + * If a call object exists and the arguments object aliases formals, the + * call object is the canonical location for formals. + */ + void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) { + JSFunction* callee = jit::CalleeTokenToFunction(frame_->calleeToken()); + ArgumentsObject::MaybeForwardToCallObject(callee, callObj_, obj, data); + } +}; + +struct CopyScriptFrameIterArgs { + ScriptFrameIter& iter_; + RootedValueVector actualArgs_; + + explicit CopyScriptFrameIterArgs(JSContext* cx, ScriptFrameIter& iter) + : iter_(iter), actualArgs_(cx) {} + + // Used to copy arguments to actualArgs_ to simplify copyArgs and + // ArgumentsObject allocation. + [[nodiscard]] bool init(JSContext* cx) { + unsigned numActuals = iter_.numActualArgs(); + if (!actualArgs_.reserve(numActuals)) { + return false; + } + + // Append actual arguments. + iter_.unaliasedForEachActual( + cx, [this](const Value& v) { actualArgs_.infallibleAppend(v); }); + MOZ_RELEASE_ASSERT(actualArgs_.length() == numActuals); + return true; + } + + void copyActualArgs(GCPtr* dst, unsigned numActuals) const { + MOZ_ASSERT(actualArgs_.length() == numActuals); + + for (Value v : actualArgs_) { + (dst++)->init(v); + } + } + + /* + * Ion frames are copying every argument onto the stack, other locations are + * invalid. + */ + void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) { + if (!iter_.isIon()) { + ArgumentsObject::MaybeForwardToCallObject(iter_.abstractFramePtr(), obj, + data); + } + } +}; + +struct CopyInlinedArgs { + HandleValueArray args_; + HandleObject callObj_; + HandleFunction callee_; + + CopyInlinedArgs(HandleValueArray args, HandleObject callObj, + HandleFunction callee) + : args_(args), callObj_(callObj), callee_(callee) {} + + void copyActualArgs(GCPtr* dst, unsigned numActuals) const { + MOZ_ASSERT(numActuals <= args_.length()); + + for (uint32_t i = 0; i < numActuals; i++) { + (dst++)->init(args_[i]); + } + } + + /* + * If a call object exists and the arguments object aliases formals, the + * call object is the canonical location for formals. + */ + void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) { + ArgumentsObject::MaybeForwardToCallObject(callee_, callObj_, obj, data); + } +}; + +ArgumentsObject* ArgumentsObject::createTemplateObject(JSContext* cx, + bool mapped) { + const JSClass* clasp = mapped ? &MappedArgumentsObject::class_ + : &UnmappedArgumentsObject::class_; + + RootedObject proto(cx, &cx->global()->getObjectPrototype()); + + constexpr ObjectFlags objectFlags = {ObjectFlag::Indexed}; + Rooted shape(cx, SharedShape::getInitialShape( + cx, clasp, cx->realm(), TaggedProto(proto), + FINALIZE_KIND, objectFlags)); + if (!shape) { + return nullptr; + } + + AutoSetNewObjectMetadata metadata(cx); + JSObject* base = + NativeObject::create(cx, FINALIZE_KIND, gc::Heap::Tenured, shape); + if (!base) { + return nullptr; + } + + ArgumentsObject* obj = &base->as(); + obj->initFixedSlot(ArgumentsObject::DATA_SLOT, PrivateValue(nullptr)); + return obj; +} + +ArgumentsObject* GlobalObject::maybeArgumentsTemplateObject(bool mapped) const { + return mapped ? data().mappedArgumentsTemplate + : data().unmappedArgumentsTemplate; +} + +/* static */ +ArgumentsObject* GlobalObject::getOrCreateArgumentsTemplateObject(JSContext* cx, + bool mapped) { + GlobalObjectData& data = cx->global()->data(); + HeapPtr& obj = + mapped ? data.mappedArgumentsTemplate : data.unmappedArgumentsTemplate; + + ArgumentsObject* templateObj = obj; + if (templateObj) { + return templateObj; + } + + templateObj = ArgumentsObject::createTemplateObject(cx, mapped); + if (!templateObj) { + return nullptr; + } + + obj.init(templateObj); + return templateObj; +} + +template +/* static */ +ArgumentsObject* ArgumentsObject::create(JSContext* cx, HandleFunction callee, + unsigned numActuals, CopyArgs& copy) { + // Self-hosted code should use the more efficient ArgumentsLength and + // GetArgument intrinsics instead of `arguments`. + MOZ_ASSERT(!callee->isSelfHostedBuiltin()); + + bool mapped = callee->baseScript()->hasMappedArgsObj(); + ArgumentsObject* templateObj = + GlobalObject::getOrCreateArgumentsTemplateObject(cx, mapped); + if (!templateObj) { + return nullptr; + } + + Rooted shape(cx, templateObj->sharedShape()); + + unsigned numFormals = callee->nargs(); + unsigned numArgs = std::max(numActuals, numFormals); + unsigned numBytes = ArgumentsData::bytesRequired(numArgs); + + AutoSetNewObjectMetadata metadata(cx); + JSObject* base = + NativeObject::create(cx, FINALIZE_KIND, gc::Heap::Default, shape); + if (!base) { + return nullptr; + } + ArgumentsObject* obj = &base->as(); + + ArgumentsData* data = reinterpret_cast( + AllocateObjectBuffer(cx, obj, numBytes)); + if (!data) { + // Make the object safe for GC. + obj->initFixedSlot(DATA_SLOT, PrivateValue(nullptr)); + return nullptr; + } + + data->numArgs = numArgs; + data->rareData = nullptr; + + InitReservedSlot(obj, DATA_SLOT, data, numBytes, MemoryUse::ArgumentsData); + obj->initFixedSlot(CALLEE_SLOT, ObjectValue(*callee)); + obj->initFixedSlot(INITIAL_LENGTH_SLOT, + Int32Value(numActuals << PACKED_BITS_COUNT)); + + // Copy [0, numActuals) into data->args. + GCPtr* args = data->args; + copy.copyActualArgs(args, numActuals); + + // Fill in missing arguments with |undefined|. + for (size_t i = numActuals; i < numArgs; i++) { + args[i].init(UndefinedValue()); + } + + copy.maybeForwardToCallObject(obj, data); + + MOZ_ASSERT(obj->initialLength() == numActuals); + MOZ_ASSERT(!obj->hasOverriddenLength()); + return obj; +} + +ArgumentsObject* ArgumentsObject::createExpected(JSContext* cx, + AbstractFramePtr frame) { + MOZ_ASSERT(frame.script()->needsArgsObj()); + RootedFunction callee(cx, frame.callee()); + CopyFrameArgs copy(frame); + ArgumentsObject* argsobj = create(cx, callee, frame.numActualArgs(), copy); + if (!argsobj) { + return nullptr; + } + + frame.initArgsObj(*argsobj); + return argsobj; +} + +ArgumentsObject* ArgumentsObject::createUnexpected(JSContext* cx, + ScriptFrameIter& iter) { + RootedFunction callee(cx, iter.callee(cx)); + CopyScriptFrameIterArgs copy(cx, iter); + if (!copy.init(cx)) { + return nullptr; + } + return create(cx, callee, iter.numActualArgs(), copy); +} + +ArgumentsObject* ArgumentsObject::createUnexpected(JSContext* cx, + AbstractFramePtr frame) { + RootedFunction callee(cx, frame.callee()); + CopyFrameArgs copy(frame); + return create(cx, callee, frame.numActualArgs(), copy); +} + +ArgumentsObject* ArgumentsObject::createForIon(JSContext* cx, + jit::JitFrameLayout* frame, + HandleObject scopeChain) { + jit::CalleeToken token = frame->calleeToken(); + MOZ_ASSERT(jit::CalleeTokenIsFunction(token)); + RootedFunction callee(cx, jit::CalleeTokenToFunction(token)); + RootedObject callObj( + cx, scopeChain->is() ? scopeChain.get() : nullptr); + CopyJitFrameArgs copy(frame, callObj); + return create(cx, callee, frame->numActualArgs(), copy); +} + +/* static */ +ArgumentsObject* ArgumentsObject::createFromValueArray( + JSContext* cx, HandleValueArray argsArray, HandleFunction callee, + HandleObject scopeChain, uint32_t numActuals) { + MOZ_ASSERT(numActuals <= MaxInlinedArgs); + RootedObject callObj( + cx, scopeChain->is() ? scopeChain.get() : nullptr); + CopyInlinedArgs copy(argsArray, callObj, callee); + return create(cx, callee, numActuals, copy); +} + +/* static */ +ArgumentsObject* ArgumentsObject::createForInlinedIon(JSContext* cx, + Value* args, + HandleFunction callee, + HandleObject scopeChain, + uint32_t numActuals) { + RootedExternalValueArray rootedArgs(cx, numActuals, args); + HandleValueArray argsArray = + HandleValueArray::fromMarkedLocation(numActuals, args); + + return createFromValueArray(cx, argsArray, callee, scopeChain, numActuals); +} + +template +/* static */ +ArgumentsObject* ArgumentsObject::finishPure( + JSContext* cx, ArgumentsObject* obj, JSFunction* callee, JSObject* callObj, + unsigned numActuals, CopyArgs& copy) { + unsigned numFormals = callee->nargs(); + unsigned numArgs = std::max(numActuals, numFormals); + unsigned numBytes = ArgumentsData::bytesRequired(numArgs); + + ArgumentsData* data = reinterpret_cast( + AllocateObjectBuffer(cx, obj, numBytes)); + if (!data) { + // Make the object safe for GC. Don't report OOM, the slow path will + // retry the allocation. + cx->recoverFromOutOfMemory(); + obj->initFixedSlot(DATA_SLOT, PrivateValue(nullptr)); + return nullptr; + } + + data->numArgs = numArgs; + data->rareData = nullptr; + + obj->initFixedSlot(INITIAL_LENGTH_SLOT, + Int32Value(numActuals << PACKED_BITS_COUNT)); + obj->initFixedSlot(DATA_SLOT, PrivateValue(data)); + AddCellMemory(obj, numBytes, MemoryUse::ArgumentsData); + obj->initFixedSlot(MAYBE_CALL_SLOT, UndefinedValue()); + obj->initFixedSlot(CALLEE_SLOT, ObjectValue(*callee)); + + GCPtr* args = data->args; + copy.copyActualArgs(args, numActuals); + + // Fill in missing arguments with |undefined|. + for (size_t i = numActuals; i < numArgs; i++) { + args[i].init(UndefinedValue()); + } + + if (callObj && callee->needsCallObject()) { + copy.maybeForwardToCallObject(obj, data); + } + + MOZ_ASSERT(obj->initialLength() == numActuals); + MOZ_ASSERT(!obj->hasOverriddenLength()); + return obj; +} + +/* static */ +ArgumentsObject* ArgumentsObject::finishForIonPure(JSContext* cx, + jit::JitFrameLayout* frame, + JSObject* scopeChain, + ArgumentsObject* obj) { + // JIT code calls this directly (no callVM), because it's faster, so we're + // not allowed to GC in here. + AutoUnsafeCallWithABI unsafe; + + JSFunction* callee = jit::CalleeTokenToFunction(frame->calleeToken()); + RootedObject callObj(cx, scopeChain->is() ? scopeChain : nullptr); + CopyJitFrameArgs copy(frame, callObj); + + unsigned numActuals = frame->numActualArgs(); + + return finishPure(cx, obj, callee, callObj, numActuals, copy); +} + +/* static */ +ArgumentsObject* ArgumentsObject::finishInlineForIonPure( + JSContext* cx, JSObject* rawCallObj, JSFunction* rawCallee, Value* args, + uint32_t numActuals, ArgumentsObject* obj) { + // JIT code calls this directly (no callVM), because it's faster, so we're + // not allowed to GC in here. + AutoUnsafeCallWithABI unsafe; + + MOZ_ASSERT(numActuals <= MaxInlinedArgs); + + RootedObject callObj(cx, rawCallObj); + RootedFunction callee(cx, rawCallee); + RootedExternalValueArray rootedArgs(cx, numActuals, args); + HandleValueArray argsArray = + HandleValueArray::fromMarkedLocation(numActuals, args); + + CopyInlinedArgs copy(argsArray, callObj, callee); + + return finishPure(cx, obj, callee, callObj, numActuals, copy); +} + +/* static */ +bool ArgumentsObject::obj_delProperty(JSContext* cx, HandleObject obj, + HandleId id, ObjectOpResult& result) { + ArgumentsObject& argsobj = obj->as(); + if (id.isInt()) { + unsigned arg = unsigned(id.toInt()); + if (argsobj.isElement(arg)) { + if (!argsobj.markElementDeleted(cx, arg)) { + return false; + } + } + } else if (id.isAtom(cx->names().length)) { + argsobj.markLengthOverridden(); + } else if (id.isAtom(cx->names().callee)) { + argsobj.as().markCalleeOverridden(); + } else if (id.isWellKnownSymbol(JS::SymbolCode::iterator)) { + argsobj.markIteratorOverridden(); + } + return result.succeed(); +} + +/* static */ +bool ArgumentsObject::obj_mayResolve(const JSAtomState& names, jsid id, + JSObject*) { + // Arguments might resolve indexes, Symbol.iterator, or length/callee. + if (id.isAtom()) { + JSAtom* atom = id.toAtom(); + return atom->isIndex() || atom == names.length || atom == names.callee; + } + + return id.isInt() || id.isWellKnownSymbol(JS::SymbolCode::iterator); +} + +bool js::MappedArgGetter(JSContext* cx, HandleObject obj, HandleId id, + MutableHandleValue vp) { + MappedArgumentsObject& argsobj = obj->as(); + if (id.isInt()) { + /* + * arg can exceed the number of arguments if a script changed the + * prototype to point to another Arguments object with a bigger argc. + */ + unsigned arg = unsigned(id.toInt()); + if (argsobj.isElement(arg)) { + vp.set(argsobj.element(arg)); + } + } else if (id.isAtom(cx->names().length)) { + if (!argsobj.hasOverriddenLength()) { + vp.setInt32(argsobj.initialLength()); + } + } else { + MOZ_ASSERT(id.isAtom(cx->names().callee)); + if (!argsobj.hasOverriddenCallee()) { + vp.setObject(argsobj.callee()); + } + } + return true; +} + +bool js::MappedArgSetter(JSContext* cx, HandleObject obj, HandleId id, + HandleValue v, ObjectOpResult& result) { + Handle argsobj = obj.as(); + + Rooted> desc(cx); + if (!GetOwnPropertyDescriptor(cx, argsobj, id, &desc)) { + return false; + } + MOZ_ASSERT(desc.isSome()); + MOZ_ASSERT(desc->isDataDescriptor()); + MOZ_ASSERT(desc->writable()); + MOZ_ASSERT(!desc->resolving()); + + if (id.isInt()) { + unsigned arg = unsigned(id.toInt()); + if (argsobj->isElement(arg)) { + argsobj->setElement(arg, v); + return result.succeed(); + } + } else { + MOZ_ASSERT(id.isAtom(cx->names().length) || id.isAtom(cx->names().callee)); + } + + /* + * For simplicity we use delete/define to replace the property with a + * simple data property. Note that we rely on ArgumentsObject::obj_delProperty + * to set the corresponding override-bit. + * Note also that we must define the property instead of setting it in case + * the user has changed the prototype to an object that has a setter for + * this id. + */ + Rooted desc_(cx, *desc); + desc_.setValue(v); + ObjectOpResult ignored; + return NativeDeleteProperty(cx, argsobj, id, ignored) && + NativeDefineProperty(cx, argsobj, id, desc_, result); +} + +/* static */ +bool ArgumentsObject::getArgumentsIterator(JSContext* cx, + MutableHandleValue val) { + Handle shName = cx->names().ArrayValues; + Rooted name(cx, cx->names().values); + return GlobalObject::getSelfHostedFunction(cx, cx->global(), shName, name, 0, + val); +} + +/* static */ +bool ArgumentsObject::reifyLength(JSContext* cx, Handle obj) { + if (obj->hasOverriddenLength()) { + return true; + } + + RootedId id(cx, NameToId(cx->names().length)); + RootedValue val(cx, Int32Value(obj->initialLength())); + if (!NativeDefineDataProperty(cx, obj, id, val, JSPROP_RESOLVING)) { + return false; + } + + obj->markLengthOverridden(); + return true; +} + +/* static */ +bool ArgumentsObject::reifyIterator(JSContext* cx, + Handle obj) { + if (obj->hasOverriddenIterator()) { + return true; + } + + RootedId iteratorId(cx, PropertyKey::Symbol(cx->wellKnownSymbols().iterator)); + RootedValue val(cx); + if (!ArgumentsObject::getArgumentsIterator(cx, &val)) { + return false; + } + if (!NativeDefineDataProperty(cx, obj, iteratorId, val, JSPROP_RESOLVING)) { + return false; + } + + obj->markIteratorOverridden(); + return true; +} + +/* static */ +bool MappedArgumentsObject::reifyCallee(JSContext* cx, + Handle obj) { + if (obj->hasOverriddenCallee()) { + return true; + } + + Rooted key(cx, NameToId(cx->names().callee)); + Rooted val(cx, ObjectValue(obj->callee())); + if (!NativeDefineDataProperty(cx, obj, key, val, JSPROP_RESOLVING)) { + return false; + } + + obj->markCalleeOverridden(); + return true; +} + +static bool ResolveArgumentsProperty(JSContext* cx, + Handle obj, HandleId id, + PropertyFlags flags, bool* resolvedp) { + MOZ_ASSERT(id.isInt() || id.isAtom(cx->names().length) || + id.isAtom(cx->names().callee)); + MOZ_ASSERT(flags.isCustomDataProperty()); + + if (!NativeObject::addCustomDataProperty(cx, obj, id, flags)) { + return false; + } + + *resolvedp = true; + return true; +} + +/* static */ +bool MappedArgumentsObject::obj_resolve(JSContext* cx, HandleObject obj, + HandleId id, bool* resolvedp) { + Rooted argsobj(cx, &obj->as()); + + if (id.isWellKnownSymbol(JS::SymbolCode::iterator)) { + if (argsobj->hasOverriddenIterator()) { + return true; + } + + if (!reifyIterator(cx, argsobj)) { + return false; + } + *resolvedp = true; + return true; + } + + PropertyFlags flags = {PropertyFlag::CustomDataProperty, + PropertyFlag::Configurable, PropertyFlag::Writable}; + if (id.isInt()) { + uint32_t arg = uint32_t(id.toInt()); + if (!argsobj->isElement(arg)) { + return true; + } + + flags.setFlag(PropertyFlag::Enumerable); + } else if (id.isAtom(cx->names().length)) { + if (argsobj->hasOverriddenLength()) { + return true; + } + } else { + if (!id.isAtom(cx->names().callee)) { + return true; + } + + if (argsobj->hasOverriddenCallee()) { + return true; + } + } + + return ResolveArgumentsProperty(cx, argsobj, id, flags, resolvedp); +} + +/* static */ +bool MappedArgumentsObject::obj_enumerate(JSContext* cx, HandleObject obj) { + Rooted argsobj(cx, &obj->as()); + + RootedId id(cx); + bool found; + + // Trigger reflection. + id = NameToId(cx->names().length); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + id = NameToId(cx->names().callee); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + id = PropertyKey::Symbol(cx->wellKnownSymbols().iterator); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + for (unsigned i = 0; i < argsobj->initialLength(); i++) { + id = PropertyKey::Int(i); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + } + + return true; +} + +static bool DefineMappedIndex(JSContext* cx, Handle obj, + HandleId id, + MutableHandle desc, + ObjectOpResult& result) { + // The custom data properties (see MappedArgGetter, MappedArgSetter) have to + // be (re)defined manually because PropertyDescriptor and NativeDefineProperty + // don't support these special properties. + // + // This exists in order to let JS code change the configurable/enumerable + // attributes for these properties. + // + // Note: because this preserves the default mapped-arguments behavior, we + // don't need to mark elements as overridden or deleted. + + MOZ_ASSERT(id.isInt()); + MOZ_ASSERT(obj->isElement(id.toInt())); + MOZ_ASSERT(!obj->containsDenseElement(id.toInt())); + + MOZ_ASSERT(!desc.isAccessorDescriptor()); + + // Mapped properties aren't used when defining a non-writable property. + MOZ_ASSERT(!desc.hasWritable() || desc.writable()); + + // First, resolve the property to simplify the code below. + PropertyResult prop; + if (!NativeLookupOwnProperty(cx, obj, id, &prop)) { + return false; + } + + MOZ_ASSERT(prop.isNativeProperty()); + + PropertyInfo propInfo = prop.propertyInfo(); + MOZ_ASSERT(propInfo.writable()); + MOZ_ASSERT(propInfo.isCustomDataProperty()); + + // Change the property's attributes by implementing the relevant parts of + // ValidateAndApplyPropertyDescriptor (ES2021 draft, 10.1.6.3), in particular + // steps 4 and 9. + + // Determine whether the property should be configurable and/or enumerable. + bool configurable = propInfo.configurable(); + bool enumerable = propInfo.enumerable(); + if (configurable) { + if (desc.hasConfigurable()) { + configurable = desc.configurable(); + } + if (desc.hasEnumerable()) { + enumerable = desc.enumerable(); + } + } else { + // Property is not configurable so disallow any attribute changes. + if ((desc.hasConfigurable() && desc.configurable()) || + (desc.hasEnumerable() && enumerable != desc.enumerable())) { + return result.fail(JSMSG_CANT_REDEFINE_PROP); + } + } + + PropertyFlags flags = propInfo.flags(); + flags.setFlag(PropertyFlag::Configurable, configurable); + flags.setFlag(PropertyFlag::Enumerable, enumerable); + if (!NativeObject::changeCustomDataPropAttributes(cx, obj, id, flags)) { + return false; + } + + return result.succeed(); +} + +// ES 2017 draft 9.4.4.2 +/* static */ +bool MappedArgumentsObject::obj_defineProperty(JSContext* cx, HandleObject obj, + HandleId id, + Handle desc, + ObjectOpResult& result) { + // Step 1. + Rooted argsobj(cx, &obj->as()); + + // Steps 2-3. + bool isMapped = false; + if (id.isInt()) { + unsigned arg = unsigned(id.toInt()); + isMapped = argsobj->isElement(arg); + } + + // Step 4. + Rooted newArgDesc(cx, desc); + + // Step 5. + bool defineMapped = false; + if (!desc.isAccessorDescriptor() && isMapped) { + // Step 5.a. + if (desc.hasWritable() && !desc.writable()) { + if (!desc.hasValue()) { + RootedValue v(cx, argsobj->element(id.toInt())); + newArgDesc.setValue(v); + } + } else { + // In this case the live mapping is supposed to keep working. + defineMapped = true; + } + } + + // Step 6. NativeDefineProperty will lookup [[Value]] for us. + if (defineMapped) { + if (!DefineMappedIndex(cx, argsobj, id, &newArgDesc, result)) { + return false; + } + } else { + if (!NativeDefineProperty(cx, obj.as(), id, newArgDesc, + result)) { + return false; + } + } + // Step 7. + if (!result.ok()) { + return true; + } + + // Step 8. + if (isMapped) { + unsigned arg = unsigned(id.toInt()); + if (desc.isAccessorDescriptor()) { + if (!argsobj->markElementDeleted(cx, arg)) { + return false; + } + } else { + if (desc.hasValue()) { + argsobj->setElement(arg, desc.value()); + } + if (desc.hasWritable() && !desc.writable()) { + if (!argsobj->markElementDeleted(cx, arg)) { + return false; + } + } + } + } + + // Step 9. + return result.succeed(); +} + +bool js::UnmappedArgGetter(JSContext* cx, HandleObject obj, HandleId id, + MutableHandleValue vp) { + UnmappedArgumentsObject& argsobj = obj->as(); + + if (id.isInt()) { + /* + * arg can exceed the number of arguments if a script changed the + * prototype to point to another Arguments object with a bigger argc. + */ + unsigned arg = unsigned(id.toInt()); + if (argsobj.isElement(arg)) { + vp.set(argsobj.element(arg)); + } + } else { + MOZ_ASSERT(id.isAtom(cx->names().length)); + if (!argsobj.hasOverriddenLength()) { + vp.setInt32(argsobj.initialLength()); + } + } + return true; +} + +bool js::UnmappedArgSetter(JSContext* cx, HandleObject obj, HandleId id, + HandleValue v, ObjectOpResult& result) { + Handle argsobj = obj.as(); + + Rooted> desc(cx); + if (!GetOwnPropertyDescriptor(cx, argsobj, id, &desc)) { + return false; + } + MOZ_ASSERT(desc.isSome()); + MOZ_ASSERT(desc->isDataDescriptor()); + MOZ_ASSERT(desc->writable()); + MOZ_ASSERT(!desc->resolving()); + + if (id.isInt()) { + unsigned arg = unsigned(id.toInt()); + if (arg < argsobj->initialLength()) { + argsobj->setElement(arg, v); + return result.succeed(); + } + } else { + MOZ_ASSERT(id.isAtom(cx->names().length)); + } + + /* + * For simplicity we use delete/define to replace the property with a + * simple data property. Note that we rely on ArgumentsObject::obj_delProperty + * to set the corresponding override-bit. + */ + Rooted desc_(cx, *desc); + desc_.setValue(v); + ObjectOpResult ignored; + return NativeDeleteProperty(cx, argsobj, id, ignored) && + NativeDefineProperty(cx, argsobj, id, desc_, result); +} + +/* static */ +bool UnmappedArgumentsObject::obj_resolve(JSContext* cx, HandleObject obj, + HandleId id, bool* resolvedp) { + Rooted argsobj(cx, + &obj->as()); + + if (id.isWellKnownSymbol(JS::SymbolCode::iterator)) { + if (argsobj->hasOverriddenIterator()) { + return true; + } + + if (!reifyIterator(cx, argsobj)) { + return false; + } + *resolvedp = true; + return true; + } + + if (id.isAtom(cx->names().callee)) { + RootedObject throwTypeError( + cx, GlobalObject::getOrCreateThrowTypeError(cx, cx->global())); + if (!throwTypeError) { + return false; + } + + unsigned attrs = JSPROP_RESOLVING | JSPROP_PERMANENT; + if (!NativeDefineAccessorProperty(cx, argsobj, id, throwTypeError, + throwTypeError, attrs)) { + return false; + } + + *resolvedp = true; + return true; + } + + PropertyFlags flags = {PropertyFlag::CustomDataProperty, + PropertyFlag::Configurable, PropertyFlag::Writable}; + if (id.isInt()) { + uint32_t arg = uint32_t(id.toInt()); + if (!argsobj->isElement(arg)) { + return true; + } + + flags.setFlag(PropertyFlag::Enumerable); + } else if (id.isAtom(cx->names().length)) { + if (argsobj->hasOverriddenLength()) { + return true; + } + } else { + return true; + } + + return ResolveArgumentsProperty(cx, argsobj, id, flags, resolvedp); +} + +/* static */ +bool UnmappedArgumentsObject::obj_enumerate(JSContext* cx, HandleObject obj) { + Rooted argsobj(cx, + &obj->as()); + + RootedId id(cx); + bool found; + + // Trigger reflection. + id = NameToId(cx->names().length); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + id = NameToId(cx->names().callee); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + id = PropertyKey::Symbol(cx->wellKnownSymbols().iterator); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + for (unsigned i = 0; i < argsobj->initialLength(); i++) { + id = PropertyKey::Int(i); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + } + + return true; +} + +void ArgumentsObject::finalize(JS::GCContext* gcx, JSObject* obj) { + MOZ_ASSERT(!IsInsideNursery(obj)); + ArgumentsObject& argsobj = obj->as(); + if (argsobj.data()) { + gcx->free_(&argsobj, argsobj.maybeRareData(), + RareArgumentsData::bytesRequired(argsobj.initialLength()), + MemoryUse::RareArgumentsData); + gcx->free_(&argsobj, argsobj.data(), + ArgumentsData::bytesRequired(argsobj.data()->numArgs), + MemoryUse::ArgumentsData); + } +} + +void ArgumentsObject::trace(JSTracer* trc, JSObject* obj) { + ArgumentsObject& argsobj = obj->as(); + if (ArgumentsData* data = + argsobj.data()) { // Template objects have no ArgumentsData. + TraceRange(trc, data->numArgs, data->begin(), js_arguments_str); + } +} + +/* static */ +size_t ArgumentsObject::objectMoved(JSObject* dst, JSObject* src) { + ArgumentsObject* ndst = &dst->as(); + const ArgumentsObject* nsrc = &src->as(); + MOZ_ASSERT(ndst->data() == nsrc->data()); + + if (!IsInsideNursery(src)) { + return 0; + } + + Nursery& nursery = dst->runtimeFromMainThread()->gc.nursery(); + + size_t nbytesTotal = 0; + uint32_t nDataBytes = ArgumentsData::bytesRequired(nsrc->data()->numArgs); + if (!nursery.isInside(nsrc->data())) { + nursery.removeMallocedBufferDuringMinorGC(nsrc->data()); + } else { + AutoEnterOOMUnsafeRegion oomUnsafe; + uint8_t* data = nsrc->zone()->pod_malloc(nDataBytes); + if (!data) { + oomUnsafe.crash( + "Failed to allocate ArgumentsObject data while tenuring."); + } + ndst->initFixedSlot(DATA_SLOT, PrivateValue(data)); + + mozilla::PodCopy(data, reinterpret_cast(nsrc->data()), + nDataBytes); + nbytesTotal += nDataBytes; + } + + AddCellMemory(ndst, nDataBytes, MemoryUse::ArgumentsData); + + if (RareArgumentsData* srcRareData = nsrc->maybeRareData()) { + uint32_t nbytes = RareArgumentsData::bytesRequired(nsrc->initialLength()); + if (!nursery.isInside(srcRareData)) { + nursery.removeMallocedBufferDuringMinorGC(srcRareData); + } else { + AutoEnterOOMUnsafeRegion oomUnsafe; + uint8_t* dstRareData = nsrc->zone()->pod_malloc(nbytes); + if (!dstRareData) { + oomUnsafe.crash( + "Failed to allocate RareArgumentsData data while tenuring."); + } + ndst->data()->rareData = (RareArgumentsData*)dstRareData; + + mozilla::PodCopy(dstRareData, reinterpret_cast(srcRareData), + nbytes); + nbytesTotal += nbytes; + } + + AddCellMemory(ndst, nbytes, MemoryUse::RareArgumentsData); + } + + return nbytesTotal; +} + +/* + * The classes below collaborate to lazily reflect and synchronize actual + * argument values, argument count, and callee function object stored in a + * stack frame with their corresponding property values in the frame's + * arguments object. + */ +const JSClassOps MappedArgumentsObject::classOps_ = { + nullptr, // addProperty + ArgumentsObject::obj_delProperty, // delProperty + MappedArgumentsObject::obj_enumerate, // enumerate + nullptr, // newEnumerate + MappedArgumentsObject::obj_resolve, // resolve + ArgumentsObject::obj_mayResolve, // mayResolve + ArgumentsObject::finalize, // finalize + nullptr, // call + nullptr, // construct + ArgumentsObject::trace, // trace +}; + +const js::ClassExtension MappedArgumentsObject::classExt_ = { + ArgumentsObject::objectMoved, // objectMovedOp +}; + +const ObjectOps MappedArgumentsObject::objectOps_ = { + nullptr, // lookupProperty + MappedArgumentsObject::obj_defineProperty, // defineProperty + nullptr, // hasProperty + nullptr, // getProperty + nullptr, // setProperty + nullptr, // getOwnPropertyDescriptor + nullptr, // deleteProperty + nullptr, // getElements + nullptr, // funToString +}; + +const JSClass MappedArgumentsObject::class_ = { + "Arguments", + JSCLASS_DELAY_METADATA_BUILDER | + JSCLASS_HAS_RESERVED_SLOTS(MappedArgumentsObject::RESERVED_SLOTS) | + JSCLASS_HAS_CACHED_PROTO(JSProto_Object) | + JSCLASS_SKIP_NURSERY_FINALIZE | JSCLASS_BACKGROUND_FINALIZE, + &MappedArgumentsObject::classOps_, + nullptr, + &MappedArgumentsObject::classExt_, + &MappedArgumentsObject::objectOps_}; + +/* + * Unmapped arguments is significantly less magical than mapped arguments, so + * it is represented by a different class while sharing some functionality. + */ +const JSClassOps UnmappedArgumentsObject::classOps_ = { + nullptr, // addProperty + ArgumentsObject::obj_delProperty, // delProperty + UnmappedArgumentsObject::obj_enumerate, // enumerate + nullptr, // newEnumerate + UnmappedArgumentsObject::obj_resolve, // resolve + ArgumentsObject::obj_mayResolve, // mayResolve + ArgumentsObject::finalize, // finalize + nullptr, // call + nullptr, // construct + ArgumentsObject::trace, // trace +}; + +const js::ClassExtension UnmappedArgumentsObject::classExt_ = { + ArgumentsObject::objectMoved, // objectMovedOp +}; + +const JSClass UnmappedArgumentsObject::class_ = { + "Arguments", + JSCLASS_DELAY_METADATA_BUILDER | + JSCLASS_HAS_RESERVED_SLOTS(UnmappedArgumentsObject::RESERVED_SLOTS) | + JSCLASS_HAS_CACHED_PROTO(JSProto_Object) | + JSCLASS_SKIP_NURSERY_FINALIZE | JSCLASS_BACKGROUND_FINALIZE, + &UnmappedArgumentsObject::classOps_, nullptr, + &UnmappedArgumentsObject::classExt_}; diff --git a/js/src/vm/ArgumentsObject.h b/js/src/vm/ArgumentsObject.h new file mode 100644 index 0000000000..93ad790f88 --- /dev/null +++ b/js/src/vm/ArgumentsObject.h @@ -0,0 +1,566 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArgumentsObject_h +#define vm_ArgumentsObject_h + +#include "mozilla/MemoryReporting.h" + +#include "gc/Barrier.h" +#include "util/BitArray.h" +#include "vm/NativeObject.h" + +namespace js { + +class AbstractFramePtr; +class ArgumentsObject; +class ScriptFrameIter; + +namespace jit { +class JitFrameLayout; +} // namespace jit + +// RareArgumentsData stores the deleted-elements bits for an arguments object. +// Because |delete arguments[i]| is uncommon, we allocate this data the first +// time an element is deleted. +class RareArgumentsData { + // Pointer to an array of bits indicating, for every argument in + // [0, initialLength) whether the element has been deleted. See + // ArgumentsObject::isElementDeleted comment. + size_t deletedBits_[1]; + + RareArgumentsData() = default; + RareArgumentsData(const RareArgumentsData&) = delete; + void operator=(const RareArgumentsData&) = delete; + + public: + static RareArgumentsData* create(JSContext* cx, ArgumentsObject* obj); + static size_t bytesRequired(size_t numActuals); + + bool isElementDeleted(size_t len, size_t i) const { + MOZ_ASSERT(i < len); + return IsBitArrayElementSet(deletedBits_, len, i); + } + void markElementDeleted(size_t len, size_t i) { + MOZ_ASSERT(i < len); + SetBitArrayElement(deletedBits_, len, i); + } +}; + +// ArgumentsData stores the initial indexed arguments provided to a function +// call. It is used to store arguments[i] -- up until the corresponding +// property is modified, when the relevant value is flagged to memorialize the +// modification. +struct ArgumentsData { + /* + * numArgs = std::max(numFormalArgs, numActualArgs) + * The array 'args' has numArgs elements. + */ + uint32_t numArgs; + + RareArgumentsData* rareData; + + /* + * This array holds either the current argument value or the magic + * forwarding value. The latter means that the function has both a + * CallObject and an ArgumentsObject AND the particular formal variable is + * aliased by the CallObject. In such cases, the CallObject holds the + * canonical value so any element access to the arguments object should load + * the value out of the CallObject (which is pointed to by MAYBE_CALL_SLOT). + */ + GCPtr args[1]; + + /* For jit use: */ + static ptrdiff_t offsetOfArgs() { return offsetof(ArgumentsData, args); } + + /* Iterate args. */ + GCPtr* begin() { return args; } + const GCPtr* begin() const { return args; } + GCPtr* end() { return args + numArgs; } + const GCPtr* end() const { return args + numArgs; } + + static size_t bytesRequired(size_t numArgs) { + return offsetof(ArgumentsData, args) + numArgs * sizeof(Value); + } +}; + +// Maximum supported value of arguments.length. This bounds the +// maximum number of arguments that can be supplied to a spread call +// or Function.prototype.apply. This value also bounds the number of +// elements parsed in an array initializer. NB: keep this in sync +// with the copy in builtin/SelfHostingDefines.h. +static const unsigned ARGS_LENGTH_MAX = 500 * 1000; + +// Maximum number of arguments supported in jitcode. This bounds the +// maximum number of arguments that can be supplied to a spread call +// or Function.prototype.apply without entering the VM. We limit the +// number of parameters we can handle to a number that does not risk +// us allocating too much stack, notably on Windows where there is a +// 4K guard page that has to be touched to extend the stack. The value +// "3000" is the size of the guard page minus an arbitrary, but large, +// safety margin. See bug 1351278. +static const uint32_t JIT_ARGS_LENGTH_MAX = 3000 / sizeof(JS::Value); + +static_assert(JIT_ARGS_LENGTH_MAX <= ARGS_LENGTH_MAX, + "maximum jit arguments should be <= maximum arguments"); + +/* + * [SMDOC] ArgumentsObject + * + * ArgumentsObject instances represent |arguments| objects created to store + * function arguments when a function is called. It's expensive to create such + * objects if they're never used, so they're only created when they are + * potentially used. + * + * Arguments objects are complicated because, for non-strict mode code, they + * must alias any named arguments which were provided to the function. Gnarly + * example: + * + * function f(a, b, c, d) + * { + * arguments[0] = "seta"; + * assertEq(a, "seta"); + * b = "setb"; + * assertEq(arguments[1], "setb"); + * c = "setc"; + * assertEq(arguments[2], undefined); + * arguments[3] = "setd"; + * assertEq(d, undefined); + * } + * f("arga", "argb"); + * + * ES5's strict mode behaves more sanely, and named arguments don't alias + * elements of an arguments object. + * + * ArgumentsObject instances use the following reserved slots: + * + * INITIAL_LENGTH_SLOT + * Stores the initial value of arguments.length, plus a bit indicating + * whether arguments.length and/or arguments[@@iterator] have been + * modified. Use initialLength(), hasOverriddenLength(), and + * hasOverriddenIterator() to access these values. If arguments.length has + * been modified, then the current value of arguments.length is stored in + * another slot associated with a new property. + * DATA_SLOT + * Stores an ArgumentsData*, described above. + * MAYBE_CALL_SLOT + * Stores the CallObject, if the callee has aliased bindings. See + * the ArgumentsData::args comment. + * CALLEE_SLOT + * Stores the initial arguments.callee. This value can be overridden on + * mapped arguments objects, see hasOverriddenCallee. + */ +class ArgumentsObject : public NativeObject { + public: + static const uint32_t INITIAL_LENGTH_SLOT = 0; + static const uint32_t DATA_SLOT = 1; + static const uint32_t MAYBE_CALL_SLOT = 2; + static const uint32_t CALLEE_SLOT = 3; + + static const uint32_t LENGTH_OVERRIDDEN_BIT = 0x1; + static const uint32_t ITERATOR_OVERRIDDEN_BIT = 0x2; + static const uint32_t ELEMENT_OVERRIDDEN_BIT = 0x4; + static const uint32_t CALLEE_OVERRIDDEN_BIT = 0x8; + static const uint32_t FORWARDED_ARGUMENTS_BIT = 0x10; + static const uint32_t PACKED_BITS_COUNT = 5; + static const uint32_t PACKED_BITS_MASK = (1 << PACKED_BITS_COUNT) - 1; + + static_assert(ARGS_LENGTH_MAX <= (UINT32_MAX >> PACKED_BITS_COUNT), + "Max arguments length must fit in available bits"); + +// Our ability to inline functions that use |arguments| is limited by +// the number of registers available to represent Value operands to +// CreateInlinedArgumentsObject. +#if defined(JS_CODEGEN_X86) + static const uint32_t MaxInlinedArgs = 1; +#else + static const uint32_t MaxInlinedArgs = 3; +#endif + + protected: + template + static ArgumentsObject* create(JSContext* cx, HandleFunction callee, + unsigned numActuals, CopyArgs& copy); + + ArgumentsData* data() const { + return reinterpret_cast( + getFixedSlot(DATA_SLOT).toPrivate()); + } + + RareArgumentsData* maybeRareData() const { return data()->rareData; } + + [[nodiscard]] bool createRareData(JSContext* cx); + + RareArgumentsData* getOrCreateRareData(JSContext* cx) { + if (!data()->rareData && !createRareData(cx)) { + return nullptr; + } + return data()->rareData; + } + + static bool obj_delProperty(JSContext* cx, HandleObject obj, HandleId id, + ObjectOpResult& result); + + static bool obj_mayResolve(const JSAtomState& names, jsid id, JSObject*); + + public: + static const uint32_t RESERVED_SLOTS = 4; + static const gc::AllocKind FINALIZE_KIND = gc::AllocKind::OBJECT4_BACKGROUND; + + /* Create an arguments object for a frame that is expecting them. */ + static ArgumentsObject* createExpected(JSContext* cx, AbstractFramePtr frame); + + /* + * Purposefully disconnect the returned arguments object from the frame + * by always creating a new copy that does not alias formal parameters. + * This allows function-local analysis to determine that formals are + * not aliased and generally simplifies arguments objects. + */ + static ArgumentsObject* createUnexpected(JSContext* cx, + ScriptFrameIter& iter); + static ArgumentsObject* createUnexpected(JSContext* cx, + AbstractFramePtr frame); + + static ArgumentsObject* createForIon(JSContext* cx, + jit::JitFrameLayout* frame, + HandleObject scopeChain); + static ArgumentsObject* createForInlinedIon(JSContext* cx, Value* args, + HandleFunction callee, + HandleObject scopeChain, + uint32_t numActuals); + static ArgumentsObject* createFromValueArray(JSContext* cx, + HandleValueArray argsArray, + HandleFunction callee, + HandleObject scopeChain, + uint32_t numActuals); + + private: + template + static ArgumentsObject* finishPure(JSContext* cx, ArgumentsObject* obj, + JSFunction* callee, JSObject* callObj, + unsigned numActuals, CopyArgs& copy); + + public: + /* + * Allocate ArgumentsData and fill reserved slots after allocating an + * ArgumentsObject in Ion code. + */ + static ArgumentsObject* finishForIonPure(JSContext* cx, + jit::JitFrameLayout* frame, + JSObject* scopeChain, + ArgumentsObject* obj); + + /* + * Allocate ArgumentsData for inlined arguments and fill reserved slots after + * allocating an ArgumentsObject in Ion code. + */ + static ArgumentsObject* finishInlineForIonPure( + JSContext* cx, JSObject* rawCallObj, JSFunction* rawCallee, Value* args, + uint32_t numActuals, ArgumentsObject* obj); + + static ArgumentsObject* createTemplateObject(JSContext* cx, bool mapped); + + /* + * Return the initial length of the arguments. This may differ from the + * current value of arguments.length! + */ + uint32_t initialLength() const { + uint32_t argc = uint32_t(getFixedSlot(INITIAL_LENGTH_SLOT).toInt32()) >> + PACKED_BITS_COUNT; + MOZ_ASSERT(argc <= ARGS_LENGTH_MAX); + return argc; + } + + // True iff arguments.length has been assigned or deleted. + bool hasOverriddenLength() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & LENGTH_OVERRIDDEN_BIT; + } + + void markLengthOverridden() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | LENGTH_OVERRIDDEN_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + // Create the default "length" property and set LENGTH_OVERRIDDEN_BIT. + static bool reifyLength(JSContext* cx, Handle obj); + + // True iff arguments[@@iterator] has been assigned or deleted. + bool hasOverriddenIterator() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & ITERATOR_OVERRIDDEN_BIT; + } + + void markIteratorOverridden() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | ITERATOR_OVERRIDDEN_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + // Create the default @@iterator property and set ITERATOR_OVERRIDDEN_BIT. + static bool reifyIterator(JSContext* cx, Handle obj); + + /* + * Return the arguments iterator function. + */ + static bool getArgumentsIterator(JSContext* cx, MutableHandleValue val); + + // True iff any element has been assigned or deleted. + bool hasOverriddenElement() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & ELEMENT_OVERRIDDEN_BIT; + } + + void markElementOverridden() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | ELEMENT_OVERRIDDEN_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + private: + /* + * Because the arguments object is a real object, its elements may be + * deleted. This is implemented by setting a 'deleted' flag for the arg + * which is read by argument object resolve and getter/setter hooks. + * + * NB: an element, once deleted, stays deleted. Thus: + * + * function f(x) { delete arguments[0]; arguments[0] = 42; return x } + * assertEq(f(1), 1); + * + * This works because, once a property is deleted from an arguments object, + * it gets regular properties with regular getters/setters that don't alias + * ArgumentsData::args. + */ + bool isElementDeleted(uint32_t i) const { + MOZ_ASSERT(i < data()->numArgs); + if (i >= initialLength()) { + return false; + } + bool result = maybeRareData() && + maybeRareData()->isElementDeleted(initialLength(), i); + MOZ_ASSERT_IF(result, hasOverriddenElement()); + return result; + } + + protected: + bool markElementDeleted(JSContext* cx, uint32_t i); + + public: + /* + * Return true iff the index is a valid element index for this arguments + * object. + * + * Returning true here doesn't imply that the element value can be read + * through |ArgumentsObject::element()|. For example unmapped arguments + * objects can have an element index property redefined without having marked + * the element as deleted. Instead use |maybeGetElement()| or manually check + * for |hasOverriddenElement()|. + */ + bool isElement(uint32_t i) const { + return i < initialLength() && !isElementDeleted(i); + } + + /* + * An ArgumentsObject serves two roles: + * - a real object, accessed through regular object operations, e.g.., + * GetElement corresponding to 'arguments[i]'; + * - a VM-internal data structure, storing the value of arguments (formal + * and actual) that are accessed directly by the VM when a reading the + * value of a formal parameter. + * There are two ways to access the ArgumentsData::args corresponding to + * these two use cases: + * - object access should use elements(i) which will take care of + * forwarding when the value is the magic forwarding value; + * - VM argument access should use arg(i) which will assert that the + * value is not the magic forwarding value (since, if such forwarding was + * needed, the frontend should have emitted JSOp::GetAliasedVar). + */ + const Value& element(uint32_t i) const; + + inline void setElement(uint32_t i, const Value& v); + + const Value& arg(unsigned i) const { + MOZ_ASSERT(i < data()->numArgs); + const Value& v = data()->args[i]; + MOZ_ASSERT(!v.isMagic()); + return v; + } + + void setArg(unsigned i, const Value& v) { + MOZ_ASSERT(i < data()->numArgs); + GCPtr& lhs = data()->args[i]; + MOZ_ASSERT(!lhs.isMagic()); + lhs = v; + } + + /* + * Test if an argument is forwarded, i.e. its actual value is stored in the + * CallObject and can't be directly read from |ArgumentsData::args|. + */ + bool argIsForwarded(unsigned i) const { + MOZ_ASSERT(i < data()->numArgs); + const Value& v = data()->args[i]; + MOZ_ASSERT_IF(IsMagicScopeSlotValue(v), anyArgIsForwarded()); + return IsMagicScopeSlotValue(v); + } + + bool anyArgIsForwarded() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & FORWARDED_ARGUMENTS_BIT; + } + + void markArgumentForwarded() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | FORWARDED_ARGUMENTS_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + /* + * Attempt to speedily and efficiently access the i-th element of this + * arguments object. Return true if the element was speedily returned. + * Return false if the element must be looked up more slowly using + * getProperty or some similar method. The second overload copies the + * elements [start, start + count) into the locations starting at 'vp'. + * + * NB: Returning false does not indicate error! + */ + bool maybeGetElement(uint32_t i, MutableHandleValue vp) { + if (i >= initialLength() || hasOverriddenElement()) { + return false; + } + vp.set(element(i)); + return true; + } + + inline bool maybeGetElements(uint32_t start, uint32_t count, js::Value* vp); + + /* + * Measures things hanging off this ArgumentsObject that are counted by the + * |miscSize| argument in JSObject::sizeOfExcludingThis(). + */ + size_t sizeOfMisc(mozilla::MallocSizeOf mallocSizeOf) const { + if (!data()) { // Template arguments objects have no data. + return 0; + } + return mallocSizeOf(data()) + mallocSizeOf(maybeRareData()); + } + size_t sizeOfData() const { + return ArgumentsData::bytesRequired(data()->numArgs) + + (maybeRareData() ? RareArgumentsData::bytesRequired(initialLength()) + : 0); + } + + static void finalize(JS::GCContext* gcx, JSObject* obj); + static void trace(JSTracer* trc, JSObject* obj); + static size_t objectMoved(JSObject* dst, JSObject* src); + + /* For jit use: */ + static size_t getDataSlotOffset() { return getFixedSlotOffset(DATA_SLOT); } + static size_t getInitialLengthSlotOffset() { + return getFixedSlotOffset(INITIAL_LENGTH_SLOT); + } + + static Value MagicEnvSlotValue(uint32_t slot) { + // When forwarding slots to a backing CallObject, the slot numbers are + // stored as uint32 magic values. This raises an ambiguity if we have + // also copied JS_OPTIMIZED_OUT magic from a JIT frame or + // JS_UNINITIALIZED_LEXICAL magic on the CallObject. To distinguish + // normal magic values (those with a JSWhyMagic) and uint32 magic + // values, we add the maximum JSWhyMagic value to the slot + // number. This is safe as ARGS_LENGTH_MAX is well below UINT32_MAX. + static_assert(UINT32_MAX - JS_WHY_MAGIC_COUNT > ARGS_LENGTH_MAX); + return JS::MagicValueUint32(slot + JS_WHY_MAGIC_COUNT); + } + static uint32_t SlotFromMagicScopeSlotValue(const Value& v) { + static_assert(UINT32_MAX - JS_WHY_MAGIC_COUNT > ARGS_LENGTH_MAX); + return v.magicUint32() - JS_WHY_MAGIC_COUNT; + } + static bool IsMagicScopeSlotValue(const Value& v) { + return v.isMagic() && v.magicUint32() > JS_WHY_MAGIC_COUNT; + } + + static void MaybeForwardToCallObject(AbstractFramePtr frame, + ArgumentsObject* obj, + ArgumentsData* data); + static void MaybeForwardToCallObject(JSFunction* callee, JSObject* callObj, + ArgumentsObject* obj, + ArgumentsData* data); +}; + +class MappedArgumentsObject : public ArgumentsObject { + static const JSClassOps classOps_; + static const ClassExtension classExt_; + static const ObjectOps objectOps_; + + public: + static const JSClass class_; + + JSFunction& callee() const { + return getFixedSlot(CALLEE_SLOT).toObject().as(); + } + + bool hasOverriddenCallee() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & CALLEE_OVERRIDDEN_BIT; + } + + void markCalleeOverridden() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | CALLEE_OVERRIDDEN_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + static size_t getCalleeSlotOffset() { + return getFixedSlotOffset(CALLEE_SLOT); + } + + // Create the default "callee" property and set CALLEE_OVERRIDDEN_BIT. + static bool reifyCallee(JSContext* cx, Handle obj); + + private: + static bool obj_enumerate(JSContext* cx, HandleObject obj); + static bool obj_resolve(JSContext* cx, HandleObject obj, HandleId id, + bool* resolvedp); + static bool obj_defineProperty(JSContext* cx, HandleObject obj, HandleId id, + Handle desc, + ObjectOpResult& result); +}; + +class UnmappedArgumentsObject : public ArgumentsObject { + static const JSClassOps classOps_; + static const ClassExtension classExt_; + + public: + static const JSClass class_; + + private: + static bool obj_enumerate(JSContext* cx, HandleObject obj); + static bool obj_resolve(JSContext* cx, HandleObject obj, HandleId id, + bool* resolvedp); +}; + +extern bool MappedArgGetter(JSContext* cx, HandleObject obj, HandleId id, + MutableHandleValue vp); + +extern bool MappedArgSetter(JSContext* cx, HandleObject obj, HandleId id, + HandleValue v, ObjectOpResult& result); + +extern bool UnmappedArgGetter(JSContext* cx, HandleObject obj, HandleId id, + MutableHandleValue vp); + +extern bool UnmappedArgSetter(JSContext* cx, HandleObject obj, HandleId id, + HandleValue v, ObjectOpResult& result); + +} // namespace js + +template <> +inline bool JSObject::is() const { + return is() || is(); +} + +#endif /* vm_ArgumentsObject_h */ diff --git a/js/src/vm/ArrayBufferObject-inl.h b/js/src/vm/ArrayBufferObject-inl.h new file mode 100644 index 0000000000..1ca36c243d --- /dev/null +++ b/js/src/vm/ArrayBufferObject-inl.h @@ -0,0 +1,57 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayBufferObject_inl_h +#define vm_ArrayBufferObject_inl_h + +// Utilities and common inline code for ArrayBufferObject and +// SharedArrayBufferObject. + +#include "vm/ArrayBufferObject.h" + +#include "vm/SharedArrayObject.h" +#include "vm/SharedMem.h" + +namespace js { + +inline SharedMem ArrayBufferObjectMaybeShared::dataPointerEither() { + if (this->is()) { + return this->as().dataPointerShared(); + } + return this->as().dataPointerShared(); +} + +inline bool ArrayBufferObjectMaybeShared::isDetached() const { + if (this->is()) { + return this->as().isDetached(); + } + return false; +} + +inline size_t ArrayBufferObjectMaybeShared::byteLength() const { + if (this->is()) { + return this->as().byteLength(); + } + return this->as().byteLength(); +} + +inline bool ArrayBufferObjectMaybeShared::isPreparedForAsmJS() const { + if (this->is()) { + return this->as().isPreparedForAsmJS(); + } + return false; +} + +inline bool ArrayBufferObjectMaybeShared::isWasm() const { + if (this->is()) { + return this->as().isWasm(); + } + return this->as().isWasm(); +} + +} // namespace js + +#endif // vm_ArrayBufferObject_inl_h diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp new file mode 100644 index 0000000000..ec7801a31d --- /dev/null +++ b/js/src/vm/ArrayBufferObject.cpp @@ -0,0 +1,2204 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/ArrayBufferObject-inl.h" +#include "vm/ArrayBufferObject.h" + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/DebugOnly.h" +#include "mozilla/Likely.h" +#include "mozilla/Maybe.h" +#include "mozilla/ScopeExit.h" +#include "mozilla/TaggedAnonymousMemory.h" + +#include // std::max, std::min +#include // std::uninitialized_copy_n +#include +#if !defined(XP_WIN) && !defined(__wasi__) +# include +#endif +#include // std::tuple +#include +#ifdef MOZ_VALGRIND +# include +#endif + +#include "jsnum.h" +#include "jstypes.h" + +#include "gc/Barrier.h" +#include "gc/Memory.h" +#include "js/ArrayBuffer.h" +#include "js/Conversions.h" +#include "js/experimental/TypedData.h" // JS_IsArrayBufferViewObject +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "js/MemoryMetrics.h" +#include "js/PropertySpec.h" +#include "js/SharedArrayBuffer.h" +#include "js/Wrapper.h" +#include "util/WindowsWrapper.h" +#include "vm/GlobalObject.h" +#include "vm/JSContext.h" +#include "vm/JSObject.h" +#include "vm/SharedArrayObject.h" +#include "vm/Warnings.h" // js::WarnNumberASCII +#include "wasm/WasmConstants.h" +#include "wasm/WasmLog.h" +#include "wasm/WasmMemory.h" +#include "wasm/WasmModuleTypes.h" +#include "wasm/WasmProcess.h" + +#include "gc/GCContext-inl.h" +#include "gc/Marking-inl.h" +#include "vm/NativeObject-inl.h" +#include "vm/Realm-inl.h" // js::AutoRealm + +using JS::ToInt32; + +using js::wasm::IndexType; +using js::wasm::Pages; +using mozilla::Atomic; +using mozilla::CheckedInt; +using mozilla::DebugOnly; +using mozilla::Maybe; +using mozilla::Nothing; +using mozilla::Some; + +using namespace js; + +// Wasm allows large amounts of memory to be reserved at a time. On 64-bit +// platforms (with "huge memories") we reserve around 4GB of virtual address +// space for every wasm memory; on 32-bit platforms we usually do not, but users +// often initialize memories in the hundreds of megabytes. +// +// If too many wasm memories remain live, we run up against system resource +// exhaustion (address space or number of memory map descriptors) - see bug +// 1068684, bug 1073934, bug 1517412, bug 1502733 for details. The limiting case +// seems to be Android on ARM64, where the per-process address space is limited +// to 4TB (39 bits) by the organization of the page tables. An earlier problem +// was Windows Vista Home 64-bit, where the per-process address space is limited +// to 8TB (40 bits). And 32-bit platforms only have 4GB of address space anyway. +// +// Thus we track the amount of memory reserved for wasm, and set a limit per +// process. We trigger GC work when we approach the limit and we throw an OOM +// error if the per-process limit is exceeded. The limit (WasmReservedBytesMax) +// is specific to architecture, OS, and OS configuration. +// +// Since the WasmReservedBytesMax limit is not generally accounted for by +// any existing GC-trigger heuristics, we need an extra heuristic for triggering +// GCs when the caller is allocating memories rapidly without other garbage +// (e.g. bug 1773225). Thus, once the reserved memory crosses the threshold +// WasmReservedBytesStartTriggering, we start triggering GCs every +// WasmReservedBytesPerTrigger bytes. Once we reach +// WasmReservedBytesStartSyncFullGC bytes reserved, we perform expensive +// non-incremental full GCs as a last-ditch effort to avoid unnecessary failure. +// Once we reach WasmReservedBytesMax, we perform further full GCs before giving +// up. +// +// (History: The original implementation only tracked the number of "huge +// memories" allocated by WASM, but this was found to be insufficient because +// 32-bit platforms have similar resource exhaustion issues. We now track +// reserved bytes directly.) +// +// (We also used to reserve significantly more than 4GB for huge memories, but +// this was reduced in bug 1442544.) + +// ASAN and TSAN use a ton of vmem for bookkeeping leaving a lot less for the +// program so use a lower limit. +#if defined(MOZ_TSAN) || defined(MOZ_ASAN) +static const uint64_t WasmMemAsanOverhead = 2; +#else +static const uint64_t WasmMemAsanOverhead = 1; +#endif + +// WasmReservedStartTriggering + WasmReservedPerTrigger must be well below +// WasmReservedStartSyncFullGC in order to provide enough time for incremental +// GC to do its job. + +#if defined(JS_CODEGEN_ARM64) && defined(ANDROID) + +static const uint64_t WasmReservedBytesMax = + 75 * wasm::HugeMappedSize / WasmMemAsanOverhead; +static const uint64_t WasmReservedBytesStartTriggering = + 15 * wasm::HugeMappedSize; +static const uint64_t WasmReservedBytesStartSyncFullGC = + WasmReservedBytesMax - 15 * wasm::HugeMappedSize; +static const uint64_t WasmReservedBytesPerTrigger = 15 * wasm::HugeMappedSize; + +#elif defined(WASM_SUPPORTS_HUGE_MEMORY) + +static const uint64_t WasmReservedBytesMax = + 1000 * wasm::HugeMappedSize / WasmMemAsanOverhead; +static const uint64_t WasmReservedBytesStartTriggering = + 100 * wasm::HugeMappedSize; +static const uint64_t WasmReservedBytesStartSyncFullGC = + WasmReservedBytesMax - 100 * wasm::HugeMappedSize; +static const uint64_t WasmReservedBytesPerTrigger = 100 * wasm::HugeMappedSize; + +#else // 32-bit (and weird 64-bit platforms without huge memory) + +static const uint64_t GiB = 1024 * 1024 * 1024; + +static const uint64_t WasmReservedBytesMax = + (4 * GiB) / 2 / WasmMemAsanOverhead; +static const uint64_t WasmReservedBytesStartTriggering = (4 * GiB) / 8; +static const uint64_t WasmReservedBytesStartSyncFullGC = + WasmReservedBytesMax - (4 * GiB) / 8; +static const uint64_t WasmReservedBytesPerTrigger = (4 * GiB) / 8; + +#endif + +// The total number of bytes reserved for wasm memories. +static Atomic wasmReservedBytes(0); +// The number of bytes of wasm memory reserved since the last GC trigger. +static Atomic wasmReservedBytesSinceLast(0); + +uint64_t js::WasmReservedBytes() { return wasmReservedBytes; } + +[[nodiscard]] static bool CheckArrayBufferTooLarge(JSContext* cx, + uint64_t nbytes) { + // Refuse to allocate too large buffers. + if (MOZ_UNLIKELY(nbytes > ArrayBufferObject::MaxByteLength)) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BAD_ARRAY_LENGTH); + return false; + } + + return true; +} + +void* js::MapBufferMemory(wasm::IndexType t, size_t mappedSize, + size_t initialCommittedSize) { + MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); + MOZ_ASSERT(initialCommittedSize % gc::SystemPageSize() == 0); + MOZ_ASSERT(initialCommittedSize <= mappedSize); + + auto failed = mozilla::MakeScopeExit( + [&] { wasmReservedBytes -= uint64_t(mappedSize); }); + wasmReservedBytes += uint64_t(mappedSize); + + // Test >= to guard against the case where multiple extant runtimes + // race to allocate. + if (wasmReservedBytes >= WasmReservedBytesMax) { + if (OnLargeAllocationFailure) { + OnLargeAllocationFailure(); + } + if (wasmReservedBytes >= WasmReservedBytesMax) { + return nullptr; + } + } + +#ifdef XP_WIN + void* data = VirtualAlloc(nullptr, mappedSize, MEM_RESERVE, PAGE_NOACCESS); + if (!data) { + return nullptr; + } + + if (!VirtualAlloc(data, initialCommittedSize, MEM_COMMIT, PAGE_READWRITE)) { + VirtualFree(data, 0, MEM_RELEASE); + return nullptr; + } +#elif defined(__wasi__) + void* data = nullptr; + if (int err = posix_memalign(&data, gc::SystemPageSize(), mappedSize)) { + MOZ_ASSERT(err == ENOMEM); + return nullptr; + } + MOZ_ASSERT(data); + memset(data, 0, mappedSize); +#else // !XP_WIN && !__wasi__ + void* data = + MozTaggedAnonymousMmap(nullptr, mappedSize, PROT_NONE, + MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved"); + if (data == MAP_FAILED) { + return nullptr; + } + + // Note we will waste a page on zero-sized memories here + if (mprotect(data, initialCommittedSize, PROT_READ | PROT_WRITE)) { + munmap(data, mappedSize); + return nullptr; + } +#endif // !XP_WIN && !__wasi__ + +#if defined(MOZ_VALGRIND) && \ + defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) + VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE( + (unsigned char*)data + initialCommittedSize, + mappedSize - initialCommittedSize); +#endif + + failed.release(); + return data; +} + +bool js::CommitBufferMemory(void* dataEnd, size_t delta) { + MOZ_ASSERT(delta); + MOZ_ASSERT(delta % gc::SystemPageSize() == 0); + +#ifdef XP_WIN + if (!VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE)) { + return false; + } +#elif defined(__wasi__) + // posix_memalign'd memory is already committed + return true; +#else + if (mprotect(dataEnd, delta, PROT_READ | PROT_WRITE)) { + return false; + } +#endif // XP_WIN + +#if defined(MOZ_VALGRIND) && \ + defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) + VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta); +#endif + + return true; +} + +bool js::ExtendBufferMapping(void* dataPointer, size_t mappedSize, + size_t newMappedSize) { + MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); + MOZ_ASSERT(newMappedSize % gc::SystemPageSize() == 0); + MOZ_ASSERT(newMappedSize >= mappedSize); + +#ifdef XP_WIN + void* mappedEnd = (char*)dataPointer + mappedSize; + uint32_t delta = newMappedSize - mappedSize; + if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS)) { + return false; + } + return true; +#elif defined(__wasi__) + return false; +#elif defined(XP_LINUX) + // Note this will not move memory (no MREMAP_MAYMOVE specified) + if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) { + return false; + } + return true; +#else + // No mechanism for remapping on MacOS and other Unices. Luckily + // shouldn't need it here as most of these are 64-bit. + return false; +#endif +} + +void js::UnmapBufferMemory(wasm::IndexType t, void* base, size_t mappedSize) { + MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); + +#ifdef XP_WIN + VirtualFree(base, 0, MEM_RELEASE); +#elif defined(__wasi__) + free(base); +#else + munmap(base, mappedSize); +#endif // XP_WIN + +#if defined(MOZ_VALGRIND) && \ + defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE) + VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)base, + mappedSize); +#endif + + // Untrack reserved memory *after* releasing memory -- otherwise, a race + // condition could enable the creation of unlimited buffers. + wasmReservedBytes -= uint64_t(mappedSize); +} + +/* + * ArrayBufferObject + * + * This class holds the underlying raw buffer that the TypedArrayObject classes + * access. It can be created explicitly and passed to a TypedArrayObject, or + * can be created implicitly by constructing a TypedArrayObject with a size. + */ + +/* + * ArrayBufferObject (base) + */ + +static const JSClassOps ArrayBufferObjectClassOps = { + nullptr, // addProperty + nullptr, // delProperty + nullptr, // enumerate + nullptr, // newEnumerate + nullptr, // resolve + nullptr, // mayResolve + ArrayBufferObject::finalize, // finalize + nullptr, // call + nullptr, // construct + nullptr, // trace +}; + +static const JSFunctionSpec arraybuffer_functions[] = { + JS_FN("isView", ArrayBufferObject::fun_isView, 1, 0), JS_FS_END}; + +static const JSPropertySpec arraybuffer_properties[] = { + JS_SELF_HOSTED_SYM_GET(species, "$ArrayBufferSpecies", 0), JS_PS_END}; + +static const JSFunctionSpec arraybuffer_proto_functions[] = { + JS_SELF_HOSTED_FN("slice", "ArrayBufferSlice", 2, 0), JS_FS_END}; + +static const JSPropertySpec arraybuffer_proto_properties[] = { + JS_PSG("byteLength", ArrayBufferObject::byteLengthGetter, 0), + JS_STRING_SYM_PS(toStringTag, "ArrayBuffer", JSPROP_READONLY), JS_PS_END}; + +static const ClassSpec ArrayBufferObjectClassSpec = { + GenericCreateConstructor, + GenericCreatePrototype, + arraybuffer_functions, + arraybuffer_properties, + arraybuffer_proto_functions, + arraybuffer_proto_properties}; + +static const ClassExtension ArrayBufferObjectClassExtension = { + ArrayBufferObject::objectMoved, // objectMovedOp +}; + +const JSClass ArrayBufferObject::class_ = { + "ArrayBuffer", + JSCLASS_DELAY_METADATA_BUILDER | + JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS) | + JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer) | + JSCLASS_BACKGROUND_FINALIZE, + &ArrayBufferObjectClassOps, &ArrayBufferObjectClassSpec, + &ArrayBufferObjectClassExtension}; + +const JSClass ArrayBufferObject::protoClass_ = { + "ArrayBuffer.prototype", JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer), + JS_NULL_CLASS_OPS, &ArrayBufferObjectClassSpec}; + +static bool IsArrayBuffer(HandleValue v) { + return v.isObject() && v.toObject().is(); +} + +MOZ_ALWAYS_INLINE bool ArrayBufferObject::byteLengthGetterImpl( + JSContext* cx, const CallArgs& args) { + MOZ_ASSERT(IsArrayBuffer(args.thisv())); + auto* buffer = &args.thisv().toObject().as(); + args.rval().setNumber(buffer->byteLength()); + return true; +} + +bool ArrayBufferObject::byteLengthGetter(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + return CallNonGenericMethod(cx, args); +} + +/* + * ArrayBuffer.isView(obj); ES6 (Dec 2013 draft) 24.1.3.1 + */ +bool ArrayBufferObject::fun_isView(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + args.rval().setBoolean(args.get(0).isObject() && + JS_IsArrayBufferViewObject(&args.get(0).toObject())); + return true; +} + +// ES2017 draft 24.1.2.1 +bool ArrayBufferObject::class_constructor(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Step 1. + if (!ThrowIfNotConstructing(cx, args, "ArrayBuffer")) { + return false; + } + + // Step 2. + uint64_t byteLength; + if (!ToIndex(cx, args.get(0), &byteLength)) { + return false; + } + + // Step 3 (Inlined 24.1.1.1 AllocateArrayBuffer). + // 24.1.1.1, step 1 (Inlined 9.1.14 OrdinaryCreateFromConstructor). + RootedObject proto(cx); + if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_ArrayBuffer, + &proto)) { + return false; + } + + // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2). + if (!CheckArrayBufferTooLarge(cx, byteLength)) { + return false; + } + + // 24.1.1.1, steps 1 and 4-6. + JSObject* bufobj = createZeroed(cx, byteLength, proto); + if (!bufobj) { + return false; + } + args.rval().setObject(*bufobj); + return true; +} + +using ArrayBufferContents = UniquePtr; + +static ArrayBufferContents AllocateUninitializedArrayBufferContents( + JSContext* cx, size_t nbytes) { + // First attempt a normal allocation. + uint8_t* p = + cx->maybe_pod_arena_malloc(js::ArrayBufferContentsArena, nbytes); + if (MOZ_UNLIKELY(!p)) { + // Otherwise attempt a large allocation, calling the + // large-allocation-failure callback if necessary. + p = static_cast(cx->runtime()->onOutOfMemoryCanGC( + js::AllocFunction::Malloc, js::ArrayBufferContentsArena, nbytes)); + if (!p) { + ReportOutOfMemory(cx); + } + } + + return ArrayBufferContents(p); +} + +static ArrayBufferContents AllocateArrayBufferContents(JSContext* cx, + size_t nbytes) { + // First attempt a normal allocation. + uint8_t* p = + cx->maybe_pod_arena_calloc(js::ArrayBufferContentsArena, nbytes); + if (MOZ_UNLIKELY(!p)) { + // Otherwise attempt a large allocation, calling the + // large-allocation-failure callback if necessary. + p = static_cast(cx->runtime()->onOutOfMemoryCanGC( + js::AllocFunction::Calloc, js::ArrayBufferContentsArena, nbytes)); + if (!p) { + ReportOutOfMemory(cx); + } + } + + return ArrayBufferContents(p); +} + +static ArrayBufferContents NewCopiedBufferContents( + JSContext* cx, Handle buffer) { + ArrayBufferContents dataCopy = + AllocateUninitializedArrayBufferContents(cx, buffer->byteLength()); + if (dataCopy) { + if (auto count = buffer->byteLength()) { + memcpy(dataCopy.get(), buffer->dataPointer(), count); + } + } + return dataCopy; +} + +/* static */ +void ArrayBufferObject::detach(JSContext* cx, + Handle buffer) { + cx->check(buffer); + MOZ_ASSERT(!buffer->isPreparedForAsmJS()); + + // Update all views of the buffer to account for the buffer having been + // detached, and clear the buffer's data and list of views. + // + // Typed object buffers are not exposed and cannot be detached. + + auto& innerViews = ObjectRealm::get(buffer).innerViews.get(); + if (InnerViewTable::ViewVector* views = + innerViews.maybeViewsUnbarriered(buffer)) { + for (size_t i = 0; i < views->length(); i++) { + JSObject* view = (*views)[i]; + view->as().notifyBufferDetached(); + } + innerViews.removeViews(buffer); + } + if (JSObject* view = buffer->firstView()) { + view->as().notifyBufferDetached(); + buffer->setFirstView(nullptr); + } + + if (buffer->dataPointer()) { + buffer->releaseData(cx->gcContext()); + buffer->setDataPointer(BufferContents::createNoData()); + } + + buffer->setByteLength(0); + buffer->setIsDetached(); +} + +/* clang-format off */ +/* + * [SMDOC] WASM Linear Memory structure + * + * Wasm Raw Buf Linear Memory Structure + * + * The linear heap in Wasm is an mmaped array buffer. Several constants manage + * its lifetime: + * + * - byteLength - the wasm-visible current length of the buffer in + * bytes. Accesses in the range [0, byteLength] succeed. May only increase. + * + * - boundsCheckLimit - the size against which we perform bounds checks. The + * value of this depends on the bounds checking strategy chosen for the array + * buffer and the specific bounds checking semantics. For asm.js code and + * for wasm code running with explicit bounds checking, it is the always the + * same as the byteLength. For wasm code using the huge-memory trick, it is + * always wasm::GuardSize smaller than mappedSize. + * + * See also "Linear memory addresses and bounds checking" in + * wasm/WasmMemory.cpp. + * + * See also WasmMemoryObject::boundsCheckLimit(). + * + * - sourceMaxSize - the optional declared limit on how far byteLength can grow + * in pages. This is the unmodified maximum size from the source module or + * JS-API invocation. This may not be representable in byte lengths, nor + * feasible for a module to actually grow to due to implementation limits. + * It is used for correct linking checks and js-types reflection. + * + * - clampedMaxSize - the maximum size on how far the byteLength can grow in + * pages. This value respects implementation limits and is always + * representable as a byte length. Every memory has a clampedMaxSize, even if + * no maximum was specified in source. When a memory has no sourceMaxSize, + * the clampedMaxSize will be the maximum amount of memory that can be grown + * to while still respecting implementation limits. + * + * - mappedSize - the actual mmapped size. Access in the range [0, mappedSize] + * will either succeed, or be handled by the wasm signal handlers. If + * sourceMaxSize is present at initialization, then we attempt to map the + * whole clampedMaxSize. Otherwise we only map the region needed for the + * initial size. + * + * The below diagram shows the layout of the wasm heap. The wasm-visible portion + * of the heap starts at 0. There is one extra page prior to the start of the + * wasm heap which contains the WasmArrayRawBuffer struct at its end (i.e. right + * before the start of the WASM heap). + * + * WasmArrayRawBuffer + * \ ArrayBufferObject::dataPointer() + * \ / + * \ | + * ______|_|______________________________________________________ + * |______|_|______________|___________________|___________________| + * 0 byteLength clampedMaxSize mappedSize + * + * \_______________________/ + * COMMITED + * \_____________________________________/ + * SLOP + * \______________________________________________________________/ + * MAPPED + * + * Invariants on byteLength, clampedMaxSize, and mappedSize: + * - byteLength only increases + * - 0 <= byteLength <= clampedMaxSize <= mappedSize + * - if sourceMaxSize is not specified, mappedSize may grow. + * It is otherwise constant. + * - initialLength <= clampedMaxSize <= sourceMaxSize (if present) + * - clampedMaxSize <= wasm::MaxMemoryPages() + * + * Invariants on boundsCheckLimit: + * - for wasm code with the huge-memory trick, + * clampedMaxSize <= boundsCheckLimit <= mappedSize + * - for asm.js code or wasm with explicit bounds checking, + * byteLength == boundsCheckLimit <= clampedMaxSize + * - on ARM, boundsCheckLimit must be a valid ARM immediate. + * - if sourceMaxSize is not specified, boundsCheckLimit may grow as + * mappedSize grows. They are otherwise constant. + + * NOTE: For asm.js on 32-bit platforms and on all platforms when running with + * explicit bounds checking, we guarantee that + * + * byteLength == maxSize == boundsCheckLimit == mappedSize + * + * That is, signal handlers will not be invoked. + * + * The region between byteLength and mappedSize is the SLOP - an area where we use + * signal handlers to catch things that slip by bounds checks. Logically it has + * two parts: + * + * - from byteLength to boundsCheckLimit - this part of the SLOP serves to catch + * accesses to memory we have reserved but not yet grown into. This allows us + * to grow memory up to max (when present) without having to patch/update the + * bounds checks. + * + * - from boundsCheckLimit to mappedSize - this part of the SLOP allows us to + * bounds check against base pointers and fold some constant offsets inside + * loads. This enables better Bounds Check Elimination. See "Linear memory + * addresses and bounds checking" in wasm/WasmMemory.cpp. + * + */ +/* clang-format on */ + +[[nodiscard]] bool WasmArrayRawBuffer::growToPagesInPlace(Pages newPages) { + size_t newSize = newPages.byteLength(); + size_t oldSize = byteLength(); + + MOZ_ASSERT(newSize >= oldSize); + MOZ_ASSERT(newPages <= clampedMaxPages()); + MOZ_ASSERT(newSize <= mappedSize()); + + size_t delta = newSize - oldSize; + MOZ_ASSERT(delta % wasm::PageSize == 0); + + uint8_t* dataEnd = dataPointer() + oldSize; + MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0); + + if (delta && !CommitBufferMemory(dataEnd, delta)) { + return false; + } + + length_ = newSize; + + return true; +} + +bool WasmArrayRawBuffer::extendMappedSize(Pages maxPages) { + size_t newMappedSize = wasm::ComputeMappedSize(maxPages); + MOZ_ASSERT(mappedSize_ <= newMappedSize); + if (mappedSize_ == newMappedSize) { + return true; + } + + if (!ExtendBufferMapping(dataPointer(), mappedSize_, newMappedSize)) { + return false; + } + + mappedSize_ = newMappedSize; + return true; +} + +void WasmArrayRawBuffer::tryGrowMaxPagesInPlace(Pages deltaMaxPages) { + Pages newMaxPages = clampedMaxPages_; + + DebugOnly valid = newMaxPages.checkedIncrement(deltaMaxPages); + // Caller must ensure increment does not overflow or increase over the + // specified maximum pages. + MOZ_ASSERT(valid); + MOZ_ASSERT_IF(sourceMaxPages_.isSome(), newMaxPages <= *sourceMaxPages_); + + if (!extendMappedSize(newMaxPages)) { + return; + } + clampedMaxPages_ = newMaxPages; +} + +void WasmArrayRawBuffer::discard(size_t byteOffset, size_t byteLen) { + uint8_t* memBase = dataPointer(); + + // The caller is responsible for ensuring these conditions are met; see this + // function's comment in ArrayBufferObject.h. + MOZ_ASSERT(byteOffset % wasm::PageSize == 0); + MOZ_ASSERT(byteLen % wasm::PageSize == 0); + MOZ_ASSERT(wasm::MemoryBoundsCheck(uint64_t(byteOffset), uint64_t(byteLen), + byteLength())); + + // Discarding zero bytes "succeeds" with no effect. + if (byteLen == 0) { + return; + } + + void* addr = memBase + uintptr_t(byteOffset); + + // On POSIX-ish platforms, we discard memory by overwriting previously-mapped + // pages with freshly-mapped pages (which are all zeroed). The operating + // system recognizes this and decreases the process RSS, and eventually + // collects the abandoned physical pages. + // + // On Windows, committing over previously-committed pages has no effect, and + // the memory must be explicitly decommitted first. This is not the same as an + // munmap; the address space is still reserved. + +#ifdef XP_WIN + if (!VirtualFree(addr, byteLen, MEM_DECOMMIT)) { + MOZ_CRASH("wasm discard: failed to decommit memory"); + } + if (!VirtualAlloc(addr, byteLen, MEM_COMMIT, PAGE_READWRITE)) { + MOZ_CRASH("wasm discard: decommitted memory but failed to recommit"); + }; +#elif defined(__wasi__) + memset(addr, 0, byteLen); +#else // !XP_WIN + void* data = MozTaggedAnonymousMmap(addr, byteLen, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANON | MAP_FIXED, -1, 0, + "wasm-reserved"); + if (data == MAP_FAILED) { + MOZ_CRASH("failed to discard wasm memory; memory mappings may be broken"); + } +#endif +} + +/* static */ +WasmArrayRawBuffer* WasmArrayRawBuffer::AllocateWasm( + IndexType indexType, Pages initialPages, Pages clampedMaxPages, + const Maybe& sourceMaxPages, const Maybe& mapped) { + // Prior code has asserted that initial pages is within our implementation + // limits (wasm::MaxMemoryPages) and we can assume it is a valid size_t. + MOZ_ASSERT(initialPages.hasByteLength()); + size_t numBytes = initialPages.byteLength(); + + // If there is a specified maximum, attempt to map the whole range for + // clampedMaxPages. Or else map only what's required for initialPages. + Pages initialMappedPages = + sourceMaxPages.isSome() ? clampedMaxPages : initialPages; + + // Use an override mapped size, or else compute the mapped size from + // initialMappedPages. + size_t mappedSize = + mapped.isSome() ? *mapped : wasm::ComputeMappedSize(initialMappedPages); + + MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize()); + MOZ_RELEASE_ASSERT(numBytes <= SIZE_MAX - gc::SystemPageSize()); + MOZ_RELEASE_ASSERT(initialPages <= clampedMaxPages); + MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0); + MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); + + uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize(); + uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize(); + + void* data = MapBufferMemory(indexType, (size_t)mappedSizeWithHeader, + (size_t)numBytesWithHeader); + if (!data) { + return nullptr; + } + + uint8_t* base = reinterpret_cast(data) + gc::SystemPageSize(); + uint8_t* header = base - sizeof(WasmArrayRawBuffer); + + auto rawBuf = new (header) WasmArrayRawBuffer( + indexType, base, clampedMaxPages, sourceMaxPages, mappedSize, numBytes); + return rawBuf; +} + +/* static */ +void WasmArrayRawBuffer::Release(void* mem) { + WasmArrayRawBuffer* header = + (WasmArrayRawBuffer*)((uint8_t*)mem - sizeof(WasmArrayRawBuffer)); + + MOZ_RELEASE_ASSERT(header->mappedSize() <= SIZE_MAX - gc::SystemPageSize()); + size_t mappedSizeWithHeader = header->mappedSize() + gc::SystemPageSize(); + + static_assert(std::is_trivially_destructible_v, + "no need to call the destructor"); + + UnmapBufferMemory(header->indexType(), header->basePointer(), + mappedSizeWithHeader); +} + +WasmArrayRawBuffer* ArrayBufferObject::BufferContents::wasmBuffer() const { + MOZ_RELEASE_ASSERT(kind_ == WASM); + return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer)); +} + +template +static bool CreateSpecificWasmBuffer( + JSContext* cx, const wasm::MemoryDesc& memory, + MutableHandleArrayBufferObjectMaybeShared maybeSharedObject) { + bool useHugeMemory = wasm::IsHugeMemoryEnabled(memory.indexType()); + Pages initialPages = memory.initialPages(); + Maybe sourceMaxPages = memory.maximumPages(); + Pages clampedMaxPages = wasm::ClampedMaxPages( + memory.indexType(), initialPages, sourceMaxPages, useHugeMemory); + + Maybe mappedSize; +#ifdef WASM_SUPPORTS_HUGE_MEMORY + // Override the mapped size if we are using huge memory. If we are not, then + // it will be calculated by the raw buffer we are using. + if (useHugeMemory) { + mappedSize = Some(wasm::HugeMappedSize); + } +#endif + + RawbufT* buffer = + RawbufT::AllocateWasm(memory.limits.indexType, initialPages, + clampedMaxPages, sourceMaxPages, mappedSize); + if (!buffer) { + if (useHugeMemory) { + WarnNumberASCII(cx, JSMSG_WASM_HUGE_MEMORY_FAILED); + if (cx->isExceptionPending()) { + cx->clearPendingException(); + } + + ReportOutOfMemory(cx); + return false; + } + + // If we fail, and have a sourceMaxPages, try to reserve the biggest + // chunk in the range [initialPages, clampedMaxPages) using log backoff. + if (!sourceMaxPages) { + wasm::Log(cx, "new Memory({initial=%" PRIu64 " pages}) failed", + initialPages.value()); + ReportOutOfMemory(cx); + return false; + } + + uint64_t cur = clampedMaxPages.value() / 2; + for (; Pages(cur) > initialPages; cur /= 2) { + buffer = RawbufT::AllocateWasm(memory.limits.indexType, initialPages, + Pages(cur), sourceMaxPages, mappedSize); + if (buffer) { + break; + } + } + + if (!buffer) { + wasm::Log(cx, "new Memory({initial=%" PRIu64 " pages}) failed", + initialPages.value()); + ReportOutOfMemory(cx); + return false; + } + + // Try to grow our chunk as much as possible. + for (size_t d = cur / 2; d >= 1; d /= 2) { + buffer->tryGrowMaxPagesInPlace(Pages(d)); + } + } + + // ObjT::createFromNewRawBuffer assumes ownership of |buffer| even in case + // of failure. + RootedArrayBufferObjectMaybeShared object( + cx, ObjT::createFromNewRawBuffer(cx, buffer, initialPages.byteLength())); + if (!object) { + return false; + } + + maybeSharedObject.set(object); + + // See MaximumLiveMappedBuffers comment above. + if (wasmReservedBytes > WasmReservedBytesStartSyncFullGC) { + JS::PrepareForFullGC(cx); + JS::NonIncrementalGC(cx, JS::GCOptions::Normal, + JS::GCReason::TOO_MUCH_WASM_MEMORY); + wasmReservedBytesSinceLast = 0; + } else if (wasmReservedBytes > WasmReservedBytesStartTriggering) { + wasmReservedBytesSinceLast += uint64_t(buffer->mappedSize()); + if (wasmReservedBytesSinceLast > WasmReservedBytesPerTrigger) { + (void)cx->runtime()->gc.triggerGC(JS::GCReason::TOO_MUCH_WASM_MEMORY); + wasmReservedBytesSinceLast = 0; + } + } else { + wasmReservedBytesSinceLast = 0; + } + + // Log the result with details on the memory allocation + if (sourceMaxPages) { + if (useHugeMemory) { + wasm::Log(cx, + "new Memory({initial:%" PRIu64 " pages, maximum:%" PRIu64 + " pages}) succeeded", + initialPages.value(), sourceMaxPages->value()); + } else { + wasm::Log(cx, + "new Memory({initial:%" PRIu64 " pages, maximum:%" PRIu64 + " pages}) succeeded " + "with internal maximum of %" PRIu64 " pages", + initialPages.value(), sourceMaxPages->value(), + object->wasmClampedMaxPages().value()); + } + } else { + wasm::Log(cx, "new Memory({initial:%" PRIu64 " pages}) succeeded", + initialPages.value()); + } + + return true; +} + +bool js::CreateWasmBuffer(JSContext* cx, const wasm::MemoryDesc& memory, + MutableHandleArrayBufferObjectMaybeShared buffer) { + MOZ_RELEASE_ASSERT(memory.initialPages() <= + wasm::MaxMemoryPages(memory.indexType())); + MOZ_RELEASE_ASSERT(cx->wasm().haveSignalHandlers); + + if (memory.isShared()) { + if (!cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_WASM_NO_SHMEM_LINK); + return false; + } + return CreateSpecificWasmBuffer(cx, memory, + buffer); + } + return CreateSpecificWasmBuffer( + cx, memory, buffer); +} + +bool ArrayBufferObject::prepareForAsmJS() { + MOZ_ASSERT(byteLength() % wasm::PageSize == 0, + "prior size checking should have guaranteed page-size multiple"); + MOZ_ASSERT(byteLength() > 0, + "prior size checking should have excluded empty buffers"); + + switch (bufferKind()) { + case MALLOCED: + case MAPPED: + case EXTERNAL: + // It's okay if this uselessly sets the flag a second time. + setIsPreparedForAsmJS(); + return true; + + case INLINE_DATA: + static_assert(wasm::PageSize > MaxInlineBytes, + "inline data must be too small to be a page size multiple"); + MOZ_ASSERT_UNREACHABLE( + "inline-data buffers should be implicitly excluded by size checks"); + return false; + + case NO_DATA: + MOZ_ASSERT_UNREACHABLE( + "size checking should have excluded detached or empty buffers"); + return false; + + // asm.js code and associated buffers are potentially long-lived. Yet a + // buffer of user-owned data *must* be detached by the user before the + // user-owned data is disposed. No caller wants to use a user-owned + // ArrayBuffer with asm.js, so just don't support this and avoid a mess of + // complexity. + case USER_OWNED: + // wasm buffers can be detached at any time. + case WASM: + MOZ_ASSERT(!isPreparedForAsmJS()); + return false; + + case BAD1: + MOZ_ASSERT_UNREACHABLE("invalid bufferKind() encountered"); + return false; + } + + MOZ_ASSERT_UNREACHABLE("non-exhaustive kind-handling switch?"); + return false; +} + +ArrayBufferObject::BufferContents ArrayBufferObject::createMappedContents( + int fd, size_t offset, size_t length) { + void* data = + gc::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT); + return BufferContents::createMapped(data); +} + +uint8_t* ArrayBufferObject::inlineDataPointer() const { + return static_cast(fixedData(JSCLASS_RESERVED_SLOTS(&class_))); +} + +uint8_t* ArrayBufferObject::dataPointer() const { + return static_cast(getFixedSlot(DATA_SLOT).toPrivate()); +} + +SharedMem ArrayBufferObject::dataPointerShared() const { + return SharedMem::unshared(getFixedSlot(DATA_SLOT).toPrivate()); +} + +ArrayBufferObject::FreeInfo* ArrayBufferObject::freeInfo() const { + MOZ_ASSERT(isExternal()); + return reinterpret_cast(inlineDataPointer()); +} + +void ArrayBufferObject::releaseData(JS::GCContext* gcx) { + switch (bufferKind()) { + case INLINE_DATA: + // Inline data doesn't require releasing. + break; + case MALLOCED: + gcx->free_(this, dataPointer(), byteLength(), + MemoryUse::ArrayBufferContents); + break; + case NO_DATA: + // There's nothing to release if there's no data. + MOZ_ASSERT(dataPointer() == nullptr); + break; + case USER_OWNED: + // User-owned data is released by, well, the user. + break; + case MAPPED: + gc::DeallocateMappedContent(dataPointer(), byteLength()); + gcx->removeCellMemory(this, associatedBytes(), + MemoryUse::ArrayBufferContents); + break; + case WASM: + WasmArrayRawBuffer::Release(dataPointer()); + gcx->removeCellMemory(this, byteLength(), MemoryUse::ArrayBufferContents); + break; + case EXTERNAL: + if (freeInfo()->freeFunc) { + // The analyzer can't know for sure whether the embedder-supplied + // free function will GC. We give the analyzer a hint here. + // (Doing a GC in the free function is considered a programmer + // error.) + JS::AutoSuppressGCAnalysis nogc; + freeInfo()->freeFunc(dataPointer(), freeInfo()->freeUserData); + } + break; + case BAD1: + MOZ_CRASH("invalid BufferKind encountered"); + break; + } +} + +void ArrayBufferObject::setDataPointer(BufferContents contents) { + setFixedSlot(DATA_SLOT, PrivateValue(contents.data())); + setFlags((flags() & ~KIND_MASK) | contents.kind()); + + if (isExternal()) { + auto info = freeInfo(); + info->freeFunc = contents.freeFunc(); + info->freeUserData = contents.freeUserData(); + } +} + +size_t ArrayBufferObject::byteLength() const { + return size_t(getFixedSlot(BYTE_LENGTH_SLOT).toPrivate()); +} + +inline size_t ArrayBufferObject::associatedBytes() const { + if (bufferKind() == MALLOCED) { + return byteLength(); + } + if (bufferKind() == MAPPED) { + return RoundUp(byteLength(), js::gc::SystemPageSize()); + } + MOZ_CRASH("Unexpected buffer kind"); +} + +void ArrayBufferObject::setByteLength(size_t length) { + MOZ_ASSERT(length <= ArrayBufferObject::MaxByteLength); + setFixedSlot(BYTE_LENGTH_SLOT, PrivateValue(length)); +} + +size_t ArrayBufferObject::wasmMappedSize() const { + if (isWasm()) { + return contents().wasmBuffer()->mappedSize(); + } + return byteLength(); +} + +IndexType ArrayBufferObject::wasmIndexType() const { + if (isWasm()) { + return contents().wasmBuffer()->indexType(); + } + MOZ_ASSERT(isPreparedForAsmJS()); + return wasm::IndexType::I32; +} + +Pages ArrayBufferObject::wasmPages() const { + if (isWasm()) { + return contents().wasmBuffer()->pages(); + } + MOZ_ASSERT(isPreparedForAsmJS()); + return Pages::fromByteLengthExact(byteLength()); +} + +Pages ArrayBufferObject::wasmClampedMaxPages() const { + if (isWasm()) { + return contents().wasmBuffer()->clampedMaxPages(); + } + MOZ_ASSERT(isPreparedForAsmJS()); + return Pages::fromByteLengthExact(byteLength()); +} + +Maybe ArrayBufferObject::wasmSourceMaxPages() const { + if (isWasm()) { + return contents().wasmBuffer()->sourceMaxPages(); + } + MOZ_ASSERT(isPreparedForAsmJS()); + return Some(Pages::fromByteLengthExact(byteLength())); +} + +size_t js::WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().wasmMappedSize(); + } + return buf->as().wasmMappedSize(); +} + +IndexType js::WasmArrayBufferIndexType( + const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().wasmIndexType(); + } + return buf->as().wasmIndexType(); +} +Pages js::WasmArrayBufferPages(const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().wasmPages(); + } + return buf->as().volatileWasmPages(); +} +Pages js::WasmArrayBufferClampedMaxPages( + const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().wasmClampedMaxPages(); + } + return buf->as().wasmClampedMaxPages(); +} +Maybe js::WasmArrayBufferSourceMaxPages( + const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().wasmSourceMaxPages(); + } + return Some(buf->as().wasmSourceMaxPages()); +} + +static void CheckStealPreconditions(Handle buffer, + JSContext* cx) { + cx->check(buffer); + + MOZ_ASSERT(!buffer->isDetached(), "can't steal from a detached buffer"); + MOZ_ASSERT(!buffer->isPreparedForAsmJS(), + "asm.js-prepared buffers don't have detachable/stealable data"); +} + +/* static */ +bool ArrayBufferObject::wasmGrowToPagesInPlace( + wasm::IndexType t, Pages newPages, HandleArrayBufferObject oldBuf, + MutableHandleArrayBufferObject newBuf, JSContext* cx) { + CheckStealPreconditions(oldBuf, cx); + + MOZ_ASSERT(oldBuf->isWasm()); + + // Check that the new pages is within our allowable range. This will + // simultaneously check against the maximum specified in source and our + // implementation limits. + if (newPages > oldBuf->wasmClampedMaxPages()) { + return false; + } + MOZ_ASSERT(newPages <= wasm::MaxMemoryPages(t) && + newPages.byteLength() <= ArrayBufferObject::MaxByteLength); + + // We have checked against the clamped maximum and so we know we can convert + // to byte lengths now. + size_t newSize = newPages.byteLength(); + + // On failure, do not throw and ensure that the original buffer is + // unmodified and valid. After WasmArrayRawBuffer::growToPagesInPlace(), the + // wasm-visible length of the buffer has been increased so it must be the + // last fallible operation. + + newBuf.set(ArrayBufferObject::createEmpty(cx)); + if (!newBuf) { + cx->clearPendingException(); + return false; + } + + MOZ_ASSERT(newBuf->isNoData()); + + if (!oldBuf->contents().wasmBuffer()->growToPagesInPlace(newPages)) { + return false; + } + + // Extract the grown contents from |oldBuf|. + BufferContents oldContents = oldBuf->contents(); + + // Overwrite |oldBuf|'s data pointer *without* releasing old data. + oldBuf->setDataPointer(BufferContents::createNoData()); + + // Detach |oldBuf| now that doing so won't release |oldContents|. + RemoveCellMemory(oldBuf, oldBuf->byteLength(), + MemoryUse::ArrayBufferContents); + ArrayBufferObject::detach(cx, oldBuf); + + // Set |newBuf|'s contents to |oldBuf|'s original contents. + newBuf->initialize(newSize, oldContents); + AddCellMemory(newBuf, newSize, MemoryUse::ArrayBufferContents); + + return true; +} + +/* static */ +bool ArrayBufferObject::wasmMovingGrowToPages( + IndexType t, Pages newPages, HandleArrayBufferObject oldBuf, + MutableHandleArrayBufferObject newBuf, JSContext* cx) { + // On failure, do not throw and ensure that the original buffer is + // unmodified and valid. + + // Check that the new pages is within our allowable range. This will + // simultaneously check against the maximum specified in source and our + // implementation limits. + if (newPages > oldBuf->wasmClampedMaxPages()) { + return false; + } + MOZ_ASSERT(newPages <= wasm::MaxMemoryPages(t) && + newPages.byteLength() < ArrayBufferObject::MaxByteLength); + + // We have checked against the clamped maximum and so we know we can convert + // to byte lengths now. + size_t newSize = newPages.byteLength(); + + if (wasm::ComputeMappedSize(newPages) <= oldBuf->wasmMappedSize() || + oldBuf->contents().wasmBuffer()->extendMappedSize(newPages)) { + return wasmGrowToPagesInPlace(t, newPages, oldBuf, newBuf, cx); + } + + newBuf.set(ArrayBufferObject::createEmpty(cx)); + if (!newBuf) { + cx->clearPendingException(); + return false; + } + + Pages clampedMaxPages = + wasm::ClampedMaxPages(t, newPages, Nothing(), /* hugeMemory */ false); + WasmArrayRawBuffer* newRawBuf = WasmArrayRawBuffer::AllocateWasm( + oldBuf->wasmIndexType(), newPages, clampedMaxPages, Nothing(), Nothing()); + if (!newRawBuf) { + return false; + } + + AddCellMemory(newBuf, newSize, MemoryUse::ArrayBufferContents); + + BufferContents contents = + BufferContents::createWasm(newRawBuf->dataPointer()); + newBuf->initialize(newSize, contents); + + memcpy(newBuf->dataPointer(), oldBuf->dataPointer(), oldBuf->byteLength()); + ArrayBufferObject::detach(cx, oldBuf); + return true; +} + +/* static */ +void ArrayBufferObject::wasmDiscard(HandleArrayBufferObject buf, + uint64_t byteOffset, uint64_t byteLen) { + MOZ_ASSERT(buf->isWasm()); + buf->contents().wasmBuffer()->discard(byteOffset, byteLen); +} + +uint32_t ArrayBufferObject::flags() const { + return uint32_t(getFixedSlot(FLAGS_SLOT).toInt32()); +} + +void ArrayBufferObject::setFlags(uint32_t flags) { + setFixedSlot(FLAGS_SLOT, Int32Value(flags)); +} + +static inline js::gc::AllocKind GetArrayBufferGCObjectKind(size_t numSlots) { + if (numSlots <= 4) { + return js::gc::AllocKind::ARRAYBUFFER4; + } + if (numSlots <= 8) { + return js::gc::AllocKind::ARRAYBUFFER8; + } + if (numSlots <= 12) { + return js::gc::AllocKind::ARRAYBUFFER12; + } + return js::gc::AllocKind::ARRAYBUFFER16; +} + +static ArrayBufferObject* NewArrayBufferObject(JSContext* cx, + HandleObject proto_, + gc::AllocKind allocKind) { + MOZ_ASSERT(allocKind == gc::AllocKind::ARRAYBUFFER4 || + allocKind == gc::AllocKind::ARRAYBUFFER8 || + allocKind == gc::AllocKind::ARRAYBUFFER12 || + allocKind == gc::AllocKind::ARRAYBUFFER16); + + RootedObject proto(cx, proto_); + if (!proto) { + proto = GlobalObject::getOrCreatePrototype(cx, JSProto_ArrayBuffer); + if (!proto) { + return nullptr; + } + } + + const JSClass* clasp = &ArrayBufferObject::class_; + + // Array buffers can store data inline so we only use fixed slots to cover the + // reserved slots, ignoring the AllocKind. + MOZ_ASSERT(ClassCanHaveFixedData(clasp)); + constexpr size_t nfixed = ArrayBufferObject::RESERVED_SLOTS; + static_assert(nfixed <= NativeObject::MAX_FIXED_SLOTS); + + Rooted shape( + cx, + SharedShape::getInitialShape(cx, clasp, cx->realm(), AsTaggedProto(proto), + nfixed, ObjectFlags())); + if (!shape) { + return nullptr; + } + + // Array buffers can't be nursery allocated but can be background-finalized. + MOZ_ASSERT(IsBackgroundFinalized(allocKind)); + MOZ_ASSERT(!CanNurseryAllocateFinalizedClass(clasp)); + constexpr gc::Heap heap = gc::Heap::Tenured; + + NativeObject* obj = NativeObject::create(cx, allocKind, heap, shape); + if (!obj) { + return nullptr; + } + + return &obj->as(); +} + +// Creates a new ArrayBufferObject with %ArrayBuffer.prototype% as proto and no +// space for inline data. +static ArrayBufferObject* NewArrayBufferObject(JSContext* cx) { + static_assert(ArrayBufferObject::RESERVED_SLOTS == 4); + return NewArrayBufferObject(cx, nullptr, gc::AllocKind::ARRAYBUFFER4); +} + +ArrayBufferObject* ArrayBufferObject::createForContents( + JSContext* cx, size_t nbytes, BufferContents contents) { + MOZ_ASSERT(contents); + MOZ_ASSERT(contents.kind() != INLINE_DATA); + MOZ_ASSERT(contents.kind() != NO_DATA); + MOZ_ASSERT(contents.kind() != WASM); + + // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2). + if (!CheckArrayBufferTooLarge(cx, nbytes)) { + return nullptr; + } + + // Some |contents| kinds need to store extra data in the ArrayBuffer beyond a + // data pointer. If needed for the particular kind, add extra fixed slots to + // the ArrayBuffer for use as raw storage to store such information. + constexpr size_t reservedSlots = ArrayBufferObject::RESERVED_SLOTS; + + size_t nAllocated = 0; + size_t nslots = reservedSlots; + if (contents.kind() == USER_OWNED) { + // No accounting to do in this case. + } else if (contents.kind() == EXTERNAL) { + // Store the FreeInfo in the inline data slots so that we + // don't use up slots for it in non-refcounted array buffers. + size_t freeInfoSlots = HowMany(sizeof(FreeInfo), sizeof(Value)); + MOZ_ASSERT(reservedSlots + freeInfoSlots <= NativeObject::MAX_FIXED_SLOTS, + "FreeInfo must fit in inline slots"); + nslots += freeInfoSlots; + } else { + // The ABO is taking ownership, so account the bytes against the zone. + nAllocated = nbytes; + if (contents.kind() == MAPPED) { + nAllocated = RoundUp(nbytes, js::gc::SystemPageSize()); + } else { + MOZ_ASSERT(contents.kind() == MALLOCED, + "should have handled all possible callers' kinds"); + } + } + + gc::AllocKind allocKind = GetArrayBufferGCObjectKind(nslots); + + AutoSetNewObjectMetadata metadata(cx); + Rooted buffer( + cx, NewArrayBufferObject(cx, nullptr, allocKind)); + if (!buffer) { + return nullptr; + } + + MOZ_ASSERT(!gc::IsInsideNursery(buffer), + "ArrayBufferObject has a finalizer that must be called to not " + "leak in some cases, so it can't be nursery-allocated"); + + buffer->initialize(nbytes, contents); + + if (contents.kind() == MAPPED || contents.kind() == MALLOCED) { + AddCellMemory(buffer, nAllocated, MemoryUse::ArrayBufferContents); + } + + return buffer; +} + +template +/* static */ std::tuple +ArrayBufferObject::createBufferAndData( + JSContext* cx, size_t nbytes, AutoSetNewObjectMetadata&, + JS::Handle proto /* = nullptr */) { + MOZ_ASSERT(nbytes <= ArrayBufferObject::MaxByteLength, + "caller must validate the byte count it passes"); + + // Try fitting the data inline with the object by repurposing fixed-slot + // storage. Add extra fixed slots if necessary to accomplish this, but don't + // exceed the maximum number of fixed slots! + size_t nslots = ArrayBufferObject::RESERVED_SLOTS; + ArrayBufferContents data; + if (nbytes <= MaxInlineBytes) { + int newSlots = HowMany(nbytes, sizeof(Value)); + MOZ_ASSERT(int(nbytes) <= newSlots * int(sizeof(Value))); + + nslots += newSlots; + } else { + data = FillType == FillContents::Uninitialized + ? AllocateUninitializedArrayBufferContents(cx, nbytes) + : AllocateArrayBufferContents(cx, nbytes); + if (!data) { + return {nullptr, nullptr}; + } + } + + gc::AllocKind allocKind = GetArrayBufferGCObjectKind(nslots); + + ArrayBufferObject* buffer = NewArrayBufferObject(cx, proto, allocKind); + if (!buffer) { + return {nullptr, nullptr}; + } + + MOZ_ASSERT(!gc::IsInsideNursery(buffer), + "ArrayBufferObject has a finalizer that must be called to not " + "leak in some cases, so it can't be nursery-allocated"); + + uint8_t* toFill; + if (data) { + toFill = data.release(); + buffer->initialize(nbytes, BufferContents::createMalloced(toFill)); + AddCellMemory(buffer, nbytes, MemoryUse::ArrayBufferContents); + } else { + toFill = static_cast(buffer->initializeToInlineData(nbytes)); + if constexpr (FillType == FillContents::Zero) { + memset(toFill, 0, nbytes); + } + } + + return {buffer, toFill}; +} + +/* static */ ArrayBufferObject* ArrayBufferObject::copy( + JSContext* cx, JS::Handle unwrappedArrayBuffer) { + if (unwrappedArrayBuffer->isDetached()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_TYPED_ARRAY_DETACHED); + return nullptr; + } + + size_t nbytes = unwrappedArrayBuffer->byteLength(); + + AutoSetNewObjectMetadata metadata(cx); + auto [buffer, toFill] = createBufferAndData( + cx, nbytes, metadata, nullptr); + if (!buffer) { + return nullptr; + } + + std::uninitialized_copy_n(unwrappedArrayBuffer->dataPointer(), nbytes, + toFill); + return buffer; +} + +ArrayBufferObject* ArrayBufferObject::createZeroed( + JSContext* cx, size_t nbytes, HandleObject proto /* = nullptr */) { + // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2). + if (!CheckArrayBufferTooLarge(cx, nbytes)) { + return nullptr; + } + + AutoSetNewObjectMetadata metadata(cx); + auto [buffer, toFill] = + createBufferAndData(cx, nbytes, metadata, proto); + (void)toFill; + return buffer; +} + +ArrayBufferObject* ArrayBufferObject::createEmpty(JSContext* cx) { + AutoSetNewObjectMetadata metadata(cx); + ArrayBufferObject* obj = NewArrayBufferObject(cx); + if (!obj) { + return nullptr; + } + + obj->initialize(0, BufferContents::createNoData()); + return obj; +} + +ArrayBufferObject* ArrayBufferObject::createFromNewRawBuffer( + JSContext* cx, WasmArrayRawBuffer* rawBuffer, size_t initialSize) { + AutoSetNewObjectMetadata metadata(cx); + ArrayBufferObject* buffer = NewArrayBufferObject(cx); + if (!buffer) { + WasmArrayRawBuffer::Release(rawBuffer->dataPointer()); + return nullptr; + } + + MOZ_ASSERT(initialSize == rawBuffer->byteLength()); + + buffer->setByteLength(initialSize); + buffer->setFlags(0); + buffer->setFirstView(nullptr); + + auto contents = BufferContents::createWasm(rawBuffer->dataPointer()); + buffer->setDataPointer(contents); + + AddCellMemory(buffer, initialSize, MemoryUse::ArrayBufferContents); + + return buffer; +} + +/* static */ uint8_t* ArrayBufferObject::stealMallocedContents( + JSContext* cx, Handle buffer) { + CheckStealPreconditions(buffer, cx); + + switch (buffer->bufferKind()) { + case MALLOCED: { + uint8_t* stolenData = buffer->dataPointer(); + MOZ_ASSERT(stolenData); + + RemoveCellMemory(buffer, buffer->byteLength(), + MemoryUse::ArrayBufferContents); + + // Overwrite the old data pointer *without* releasing the contents + // being stolen. + buffer->setDataPointer(BufferContents::createNoData()); + + // Detach |buffer| now that doing so won't free |stolenData|. + ArrayBufferObject::detach(cx, buffer); + return stolenData; + } + + case INLINE_DATA: + case NO_DATA: + case USER_OWNED: + case MAPPED: + case EXTERNAL: { + // We can't use these data types directly. Make a copy to return. + ArrayBufferContents copiedData = NewCopiedBufferContents(cx, buffer); + if (!copiedData) { + return nullptr; + } + + // Detach |buffer|. This immediately releases the currently owned + // contents, freeing or unmapping data in the MAPPED and EXTERNAL cases. + ArrayBufferObject::detach(cx, buffer); + return copiedData.release(); + } + + case WASM: + MOZ_ASSERT_UNREACHABLE( + "wasm buffers aren't stealable except by a " + "memory.grow operation that shouldn't call this " + "function"); + return nullptr; + + case BAD1: + MOZ_ASSERT_UNREACHABLE("bad kind when stealing malloc'd data"); + return nullptr; + } + + MOZ_ASSERT_UNREACHABLE("garbage kind computed"); + return nullptr; +} + +/* static */ ArrayBufferObject::BufferContents +ArrayBufferObject::extractStructuredCloneContents( + JSContext* cx, Handle buffer) { + CheckStealPreconditions(buffer, cx); + + BufferContents contents = buffer->contents(); + + switch (contents.kind()) { + case INLINE_DATA: + case NO_DATA: + case USER_OWNED: { + ArrayBufferContents copiedData = NewCopiedBufferContents(cx, buffer); + if (!copiedData) { + return BufferContents::createFailed(); + } + + ArrayBufferObject::detach(cx, buffer); + return BufferContents::createMalloced(copiedData.release()); + } + + case MALLOCED: + case MAPPED: { + MOZ_ASSERT(contents); + + RemoveCellMemory(buffer, buffer->associatedBytes(), + MemoryUse::ArrayBufferContents); + + // Overwrite the old data pointer *without* releasing old data. + buffer->setDataPointer(BufferContents::createNoData()); + + // Detach |buffer| now that doing so won't release |oldContents|. + ArrayBufferObject::detach(cx, buffer); + return contents; + } + + case WASM: + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_WASM_NO_TRANSFER); + return BufferContents::createFailed(); + + case EXTERNAL: + MOZ_ASSERT_UNREACHABLE( + "external ArrayBuffer shouldn't have passed the " + "structured-clone preflighting"); + break; + + case BAD1: + MOZ_ASSERT_UNREACHABLE("bad kind when stealing malloc'd data"); + break; + } + + MOZ_ASSERT_UNREACHABLE("garbage kind computed"); + return BufferContents::createFailed(); +} + +/* static */ +void ArrayBufferObject::addSizeOfExcludingThis( + JSObject* obj, mozilla::MallocSizeOf mallocSizeOf, JS::ClassInfo* info, + JS::RuntimeSizes* runtimeSizes) { + auto& buffer = obj->as(); + switch (buffer.bufferKind()) { + case INLINE_DATA: + // Inline data's size should be reported by this object's size-class + // reporting. + break; + case MALLOCED: + if (buffer.isPreparedForAsmJS()) { + info->objectsMallocHeapElementsAsmJS += + mallocSizeOf(buffer.dataPointer()); + } else { + info->objectsMallocHeapElementsNormal += + mallocSizeOf(buffer.dataPointer()); + } + break; + case NO_DATA: + // No data is no memory. + MOZ_ASSERT(buffer.dataPointer() == nullptr); + break; + case USER_OWNED: + // User-owned data should be accounted for by the user. + break; + case EXTERNAL: + // External data will be accounted for by the owner of the buffer, + // not this view. + break; + case MAPPED: + info->objectsNonHeapElementsNormal += buffer.byteLength(); + break; + case WASM: + if (!buffer.isDetached()) { + info->objectsNonHeapElementsWasm += buffer.byteLength(); + if (runtimeSizes) { + MOZ_ASSERT(buffer.wasmMappedSize() >= buffer.byteLength()); + runtimeSizes->wasmGuardPages += + buffer.wasmMappedSize() - buffer.byteLength(); + } + } + break; + case BAD1: + MOZ_CRASH("bad bufferKind()"); + } +} + +/* static */ +void ArrayBufferObject::finalize(JS::GCContext* gcx, JSObject* obj) { + obj->as().releaseData(gcx); +} + +/* static */ +void ArrayBufferObject::copyData(Handle toBuffer, + size_t toIndex, + Handle fromBuffer, + size_t fromIndex, size_t count) { + MOZ_ASSERT(toBuffer->byteLength() >= count); + MOZ_ASSERT(toBuffer->byteLength() >= toIndex + count); + MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex); + MOZ_ASSERT(fromBuffer->byteLength() >= fromIndex + count); + + memcpy(toBuffer->dataPointer() + toIndex, + fromBuffer->dataPointer() + fromIndex, count); +} + +/* static */ +size_t ArrayBufferObject::objectMoved(JSObject* obj, JSObject* old) { + ArrayBufferObject& dst = obj->as(); + const ArrayBufferObject& src = old->as(); + + // Fix up possible inline data pointer. + if (src.hasInlineData()) { + dst.setFixedSlot(DATA_SLOT, PrivateValue(dst.inlineDataPointer())); + } + + return 0; +} + +JSObject* ArrayBufferObject::firstView() { + return getFixedSlot(FIRST_VIEW_SLOT).isObject() + ? &getFixedSlot(FIRST_VIEW_SLOT).toObject() + : nullptr; +} + +void ArrayBufferObject::setFirstView(ArrayBufferViewObject* view) { + setFixedSlot(FIRST_VIEW_SLOT, ObjectOrNullValue(view)); +} + +bool ArrayBufferObject::addView(JSContext* cx, ArrayBufferViewObject* view) { + if (!firstView()) { + setFirstView(view); + return true; + } + + return ObjectRealm::get(this).innerViews.get().addView(cx, this, view); +} + +/* + * InnerViewTable + */ + +constexpr size_t VIEW_LIST_MAX_LENGTH = 500; + +bool InnerViewTable::addView(JSContext* cx, ArrayBufferObject* buffer, + JSObject* view) { + // ArrayBufferObject entries are only added when there are multiple views. + MOZ_ASSERT(buffer->firstView()); + + Map::AddPtr p = map.lookupForAdd(buffer); + + MOZ_ASSERT(!gc::IsInsideNursery(buffer)); + bool addToNursery = nurseryKeysValid && gc::IsInsideNursery(view); + + if (p) { + ViewVector& views = p->value(); + MOZ_ASSERT(!views.empty()); + + if (addToNursery) { + // Only add the entry to |nurseryKeys| if it isn't already there. + if (views.length() >= VIEW_LIST_MAX_LENGTH) { + // To avoid quadratic blowup, skip the loop below if we end up + // adding enormous numbers of views for the same object. + nurseryKeysValid = false; + } else { + for (size_t i = 0; i < views.length(); i++) { + if (gc::IsInsideNursery(views[i])) { + addToNursery = false; + break; + } + } + } + } + + if (!views.append(view)) { + ReportOutOfMemory(cx); + return false; + } + } else { + if (!map.add(p, buffer, ViewVector(cx->zone()))) { + ReportOutOfMemory(cx); + return false; + } + // ViewVector has one inline element, so the first insertion is + // guaranteed to succeed. + MOZ_ALWAYS_TRUE(p->value().append(view)); + } + + if (addToNursery && !nurseryKeys.append(buffer)) { + nurseryKeysValid = false; + } + + return true; +} + +InnerViewTable::ViewVector* InnerViewTable::maybeViewsUnbarriered( + ArrayBufferObject* buffer) { + Map::Ptr p = map.lookup(buffer); + if (p) { + return &p->value(); + } + return nullptr; +} + +void InnerViewTable::removeViews(ArrayBufferObject* buffer) { + Map::Ptr p = map.lookup(buffer); + MOZ_ASSERT(p); + + map.remove(p); +} + +bool InnerViewTable::traceWeak(JSTracer* trc) { return map.traceWeak(trc); } + +void InnerViewTable::sweepAfterMinorGC(JSTracer* trc) { + MOZ_ASSERT(needsSweepAfterMinorGC()); + + if (nurseryKeysValid) { + for (size_t i = 0; i < nurseryKeys.length(); i++) { + JSObject* buffer = MaybeForwarded(nurseryKeys[i]); + Map::Ptr p = map.lookup(buffer); + if (p && + !Map::EntryGCPolicy::traceWeak(trc, &p->mutableKey(), &p->value())) { + map.remove(p); + } + } + } else { + // Do the required sweeping by looking at every map entry. + map.traceWeak(trc); + } + + nurseryKeys.clear(); + nurseryKeysValid = true; +} + +size_t InnerViewTable::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) { + size_t vectorSize = 0; + for (Map::Enum e(map); !e.empty(); e.popFront()) { + vectorSize += e.front().value().sizeOfExcludingThis(mallocSizeOf); + } + + return vectorSize + map.shallowSizeOfExcludingThis(mallocSizeOf) + + nurseryKeys.sizeOfExcludingThis(mallocSizeOf); +} + +template <> +bool JSObject::is() const { + return is() || is(); +} + +JS_PUBLIC_API size_t JS::GetArrayBufferByteLength(JSObject* obj) { + ArrayBufferObject* aobj = obj->maybeUnwrapAs(); + return aobj ? aobj->byteLength() : 0; +} + +JS_PUBLIC_API uint8_t* JS::GetArrayBufferData(JSObject* obj, + bool* isSharedMemory, + const JS::AutoRequireNoGC&) { + ArrayBufferObject* aobj = obj->maybeUnwrapIf(); + if (!aobj) { + return nullptr; + } + *isSharedMemory = false; + return aobj->dataPointer(); +} + +static ArrayBufferObject* UnwrapOrReportArrayBuffer( + JSContext* cx, JS::Handle maybeArrayBuffer) { + JSObject* obj = CheckedUnwrapStatic(maybeArrayBuffer); + if (!obj) { + ReportAccessDenied(cx); + return nullptr; + } + + if (!obj->is()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_ARRAYBUFFER_REQUIRED); + return nullptr; + } + + return &obj->as(); +} + +JS_PUBLIC_API bool JS::DetachArrayBuffer(JSContext* cx, HandleObject obj) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj); + + Rooted unwrappedBuffer( + cx, UnwrapOrReportArrayBuffer(cx, obj)); + if (!unwrappedBuffer) { + return false; + } + + if (unwrappedBuffer->isWasm() || unwrappedBuffer->isPreparedForAsmJS()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_WASM_NO_TRANSFER); + return false; + } + + AutoRealm ar(cx, unwrappedBuffer); + ArrayBufferObject::detach(cx, unwrappedBuffer); + return true; +} + +JS_PUBLIC_API bool JS::HasDefinedArrayBufferDetachKey(JSContext* cx, + HandleObject obj, + bool* isDefined) { + Rooted unwrappedBuffer( + cx, UnwrapOrReportArrayBuffer(cx, obj)); + if (!unwrappedBuffer) { + return false; + } + + if (unwrappedBuffer->isWasm() || unwrappedBuffer->isPreparedForAsmJS()) { + *isDefined = true; + } + + return true; +} + +JS_PUBLIC_API bool JS::IsDetachedArrayBufferObject(JSObject* obj) { + ArrayBufferObject* aobj = obj->maybeUnwrapIf(); + if (!aobj) { + return false; + } + + return aobj->isDetached(); +} + +JS_PUBLIC_API JSObject* JS::NewArrayBuffer(JSContext* cx, size_t nbytes) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + return ArrayBufferObject::createZeroed(cx, nbytes); +} + +JS_PUBLIC_API JSObject* JS::NewArrayBufferWithContents(JSContext* cx, + size_t nbytes, + void* data) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + MOZ_ASSERT_IF(!data, nbytes == 0); + + if (!data) { + // Don't pass nulled contents to |createForContents|. + return ArrayBufferObject::createZeroed(cx, 0); + } + + using BufferContents = ArrayBufferObject::BufferContents; + + BufferContents contents = BufferContents::createMalloced(data); + return ArrayBufferObject::createForContents(cx, nbytes, contents); +} + +JS_PUBLIC_API JSObject* JS::CopyArrayBuffer(JSContext* cx, + Handle arrayBuffer) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + MOZ_ASSERT(arrayBuffer != nullptr); + + Rooted unwrappedSource( + cx, UnwrapOrReportArrayBuffer(cx, arrayBuffer)); + if (!unwrappedSource) { + return nullptr; + } + + return ArrayBufferObject::copy(cx, unwrappedSource); +} + +JS_PUBLIC_API JSObject* JS::NewExternalArrayBuffer( + JSContext* cx, size_t nbytes, void* data, + JS::BufferContentsFreeFunc freeFunc, void* freeUserData) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + MOZ_ASSERT(data); + + using BufferContents = ArrayBufferObject::BufferContents; + + BufferContents contents = + BufferContents::createExternal(data, freeFunc, freeUserData); + return ArrayBufferObject::createForContents(cx, nbytes, contents); +} + +JS_PUBLIC_API JSObject* JS::NewArrayBufferWithUserOwnedContents(JSContext* cx, + size_t nbytes, + void* data) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + MOZ_ASSERT(data); + + using BufferContents = ArrayBufferObject::BufferContents; + + BufferContents contents = BufferContents::createUserOwned(data); + return ArrayBufferObject::createForContents(cx, nbytes, contents); +} + +JS_PUBLIC_API bool JS::IsArrayBufferObject(JSObject* obj) { + return obj->canUnwrapAs(); +} + +JS_PUBLIC_API bool JS::ArrayBufferHasData(JSObject* obj) { + return !obj->unwrapAs().isDetached(); +} + +JS_PUBLIC_API JSObject* JS::UnwrapArrayBuffer(JSObject* obj) { + return obj->maybeUnwrapIf(); +} + +JS_PUBLIC_API JSObject* JS::UnwrapSharedArrayBuffer(JSObject* obj) { + return obj->maybeUnwrapIf(); +} + +JS_PUBLIC_API void* JS::StealArrayBufferContents(JSContext* cx, + HandleObject obj) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj); + + Rooted unwrappedBuffer( + cx, UnwrapOrReportArrayBuffer(cx, obj)); + if (!unwrappedBuffer) { + return nullptr; + } + + if (unwrappedBuffer->isDetached()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_TYPED_ARRAY_DETACHED); + return nullptr; + } + + if (unwrappedBuffer->isWasm() || unwrappedBuffer->isPreparedForAsmJS()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_WASM_NO_TRANSFER); + return nullptr; + } + + AutoRealm ar(cx, unwrappedBuffer); + return ArrayBufferObject::stealMallocedContents(cx, unwrappedBuffer); +} + +JS_PUBLIC_API JSObject* JS::NewMappedArrayBufferWithContents(JSContext* cx, + size_t nbytes, + void* data) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + MOZ_ASSERT(data); + + using BufferContents = ArrayBufferObject::BufferContents; + + BufferContents contents = BufferContents::createMapped(data); + return ArrayBufferObject::createForContents(cx, nbytes, contents); +} + +JS_PUBLIC_API void* JS::CreateMappedArrayBufferContents(int fd, size_t offset, + size_t length) { + return ArrayBufferObject::createMappedContents(fd, offset, length).data(); +} + +JS_PUBLIC_API void JS::ReleaseMappedArrayBufferContents(void* contents, + size_t length) { + gc::DeallocateMappedContent(contents, length); +} + +JS_PUBLIC_API bool JS::IsMappedArrayBufferObject(JSObject* obj) { + ArrayBufferObject* aobj = obj->maybeUnwrapIf(); + if (!aobj) { + return false; + } + + return aobj->isMapped(); +} + +JS_PUBLIC_API JSObject* JS::GetObjectAsArrayBuffer(JSObject* obj, + size_t* length, + uint8_t** data) { + ArrayBufferObject* aobj = obj->maybeUnwrapIf(); + if (!aobj) { + return nullptr; + } + + *length = aobj->byteLength(); + *data = aobj->dataPointer(); + + return aobj; +} + +JS_PUBLIC_API void JS::GetArrayBufferLengthAndData(JSObject* obj, + size_t* length, + bool* isSharedMemory, + uint8_t** data) { + auto& aobj = obj->as(); + *length = aobj.byteLength(); + *data = aobj.dataPointer(); + *isSharedMemory = false; +} + +const JSClass* const JS::ArrayBuffer::UnsharedClass = + &ArrayBufferObject::class_; +const JSClass* const JS::ArrayBuffer::SharedClass = + &SharedArrayBufferObject::class_; + +/* static */ JS::ArrayBuffer JS::ArrayBuffer::create(JSContext* cx, + size_t nbytes) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + return JS::ArrayBuffer(ArrayBufferObject::createZeroed(cx, nbytes)); +} + +uint8_t* JS::ArrayBuffer::getLengthAndData(size_t* length, bool* isSharedMemory, + const JS::AutoRequireNoGC& nogc) { + auto* buffer = obj->maybeUnwrapAs(); + if (!buffer) { + return nullptr; + } + *length = buffer->byteLength(); + if (buffer->is()) { + *isSharedMemory = true; + return buffer->dataPointerEither().unwrap(); + } + *isSharedMemory = false; + return buffer->as().dataPointer(); +}; + +JS::ArrayBuffer JS::ArrayBuffer::unwrap(JSObject* maybeWrapped) { + if (!maybeWrapped) { + return JS::ArrayBuffer(nullptr); + } + auto* ab = maybeWrapped->maybeUnwrapIf(); + return fromObject(ab); +} + +bool JS::ArrayBufferCopyData(JSContext* cx, Handle toBlock, + size_t toIndex, Handle fromBlock, + size_t fromIndex, size_t count) { + Rooted unwrappedToBlock( + cx, toBlock->maybeUnwrapIf()); + if (!unwrappedToBlock) { + ReportAccessDenied(cx); + return false; + } + + Rooted unwrappedFromBlock( + cx, fromBlock->maybeUnwrapIf()); + if (!unwrappedFromBlock) { + ReportAccessDenied(cx); + return false; + } + + // Verify that lengths still make sense and throw otherwise. + if (toIndex + count < toIndex || // size_t overflow + fromIndex + count < fromIndex || // size_t overflow + toIndex + count > unwrappedToBlock->byteLength() || + fromIndex + count > unwrappedFromBlock->byteLength()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_ARRAYBUFFER_COPY_RANGE); + return false; + } + + // If both are array buffers, can use ArrayBufferCopyData + if (unwrappedToBlock->is() && + unwrappedFromBlock->is()) { + Rooted toArray( + cx, &unwrappedToBlock->as()); + Rooted fromArray( + cx, &unwrappedFromBlock->as()); + ArrayBufferObject::copyData(toArray, toIndex, fromArray, fromIndex, count); + return true; + } + + Rooted toArray( + cx, &unwrappedToBlock->as()); + Rooted fromArray( + cx, &unwrappedFromBlock->as()); + SharedArrayBufferObject::copyData(toArray, toIndex, fromArray, fromIndex, + count); + + return true; +} + +// https://tc39.es/ecma262/#sec-clonearraybuffer +// We only support the case where cloneConstructor is %ArrayBuffer%. Note, +// this means that cloning a SharedArrayBuffer will produce an ArrayBuffer +JSObject* JS::ArrayBufferClone(JSContext* cx, Handle srcBuffer, + size_t srcByteOffset, size_t srcLength) { + MOZ_ASSERT(srcBuffer->is()); + + // 2. (reordered) If IsDetachedBuffer(srcBuffer) is true, throw a TypeError + // exception. + if (IsDetachedArrayBufferObject(srcBuffer)) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_TYPED_ARRAY_DETACHED); + return nullptr; + } + + // 1. Let targetBuffer be ? AllocateArrayBuffer(cloneConstructor, srcLength). + JS::RootedObject targetBuffer(cx, JS::NewArrayBuffer(cx, srcLength)); + if (!targetBuffer) { + return nullptr; + } + + // 3. Let srcBlock be srcBuffer.[[ArrayBufferData]]. + // 4. Let targetBlock be targetBuffer.[[ArrayBufferData]]. + // 5. Perform CopyDataBlockBytes(targetBlock, 0, srcBlock, srcByteOffset, + // srcLength). + if (!ArrayBufferCopyData(cx, targetBuffer, 0, srcBuffer, srcByteOffset, + srcLength)) { + return nullptr; + } + + // 6. Return targetBuffer. + return targetBuffer; +} diff --git a/js/src/vm/ArrayBufferObject.h b/js/src/vm/ArrayBufferObject.h new file mode 100644 index 0000000000..d2d94d6722 --- /dev/null +++ b/js/src/vm/ArrayBufferObject.h @@ -0,0 +1,660 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayBufferObject_h +#define vm_ArrayBufferObject_h + +#include "mozilla/Maybe.h" + +#include // std::tuple + +#include "builtin/TypedArrayConstants.h" +#include "gc/Memory.h" +#include "gc/ZoneAllocator.h" +#include "js/ArrayBuffer.h" +#include "js/GCHashTable.h" +#include "vm/JSFunction.h" +#include "vm/JSObject.h" +#include "vm/SharedMem.h" +#include "wasm/WasmMemory.h" + +namespace js { + +class ArrayBufferViewObject; +class AutoSetNewObjectMetadata; +class WasmArrayRawBuffer; + +namespace wasm { +struct MemoryDesc; +} // namespace wasm + +// Create a new mapping of size `mappedSize` with an initially committed prefix +// of size `initialCommittedSize`. Both arguments denote bytes and must be +// multiples of the page size, with `initialCommittedSize` <= `mappedSize`. +// Returns nullptr on failure. +void* MapBufferMemory(wasm::IndexType, size_t mappedSize, + size_t initialCommittedSize); + +// Commit additional memory in an existing mapping. `dataEnd` must be the +// correct value for the end of the existing committed area, and `delta` must be +// a byte amount to grow the mapping by, and must be a multiple of the page +// size. Returns false on failure. +bool CommitBufferMemory(void* dataEnd, size_t delta); + +// Extend an existing mapping by adding uncommited pages to it. `dataStart` +// must be the pointer to the start of the existing mapping, `mappedSize` the +// size of the existing mapping, and `newMappedSize` the size of the extended +// mapping (sizes in bytes), with `mappedSize` <= `newMappedSize`. Both sizes +// must be divisible by the page size. Returns false on failure. +bool ExtendBufferMapping(void* dataStart, size_t mappedSize, + size_t newMappedSize); + +// Remove an existing mapping. `dataStart` must be the pointer to the start of +// the mapping, and `mappedSize` the size of that mapping. +void UnmapBufferMemory(wasm::IndexType t, void* dataStart, size_t mappedSize); + +// Return the number of bytes currently reserved for WebAssembly memory +uint64_t WasmReservedBytes(); + +// The inheritance hierarchy for the various classes relating to typed arrays +// is as follows. +// +// +// - JSObject +// - TypedObject (declared in wasm/TypedObject.h) +// - NativeObject +// - ArrayBufferObjectMaybeShared +// - ArrayBufferObject +// - SharedArrayBufferObject +// - ArrayBufferViewObject +// - DataViewObject +// - TypedArrayObject (declared in vm/TypedArrayObject.h) +// - TypedArrayObjectTemplate +// - Int8ArrayObject +// - Uint8ArrayObject +// - ... +// +// Note that |TypedArrayObjectTemplate| is just an implementation +// detail that makes implementing its various subclasses easier. +// +// ArrayBufferObject and SharedArrayBufferObject are unrelated data types: +// the racy memory of the latter cannot substitute for the non-racy memory of +// the former; the non-racy memory of the former cannot be used with the +// atomics; the former can be detached and the latter not. Hence they have been +// separated completely. +// +// Most APIs will only accept ArrayBufferObject. ArrayBufferObjectMaybeShared +// exists as a join point to allow APIs that can take or use either, notably +// AsmJS. +// +// In contrast with the separation of ArrayBufferObject and +// SharedArrayBufferObject, the TypedArray types can map either. +// +// The possible data ownership and reference relationships with ArrayBuffers +// and related classes are enumerated below. These are the possible locations +// for typed data: +// +// (1) malloc'ed or mmap'ed data owned by an ArrayBufferObject. +// (2) Data allocated inline with an ArrayBufferObject. +// (3) Data allocated inline with a TypedArrayObject. +// (4) Data allocated inline with an InlineTypedObject. +// +// An ArrayBufferObject may point to any of these sources of data, except (3). +// All array buffer views may point to any of these sources of data, except +// that (3) may only be pointed to by the typed array the data is inline with. +// +// During a minor GC, (3) and (4) may move. During a compacting GC, (2), (3), +// and (4) may move. + +class ArrayBufferObjectMaybeShared; + +wasm::IndexType WasmArrayBufferIndexType( + const ArrayBufferObjectMaybeShared* buf); +wasm::Pages WasmArrayBufferPages(const ArrayBufferObjectMaybeShared* buf); +wasm::Pages WasmArrayBufferClampedMaxPages( + const ArrayBufferObjectMaybeShared* buf); +mozilla::Maybe WasmArrayBufferSourceMaxPages( + const ArrayBufferObjectMaybeShared* buf); +size_t WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf); + +class ArrayBufferObjectMaybeShared : public NativeObject { + public: + inline size_t byteLength() const; + inline bool isDetached() const; + inline SharedMem dataPointerEither(); + + // WebAssembly support: + // Note: the eventual goal is to remove this from ArrayBuffer and have + // (Shared)ArrayBuffers alias memory owned by some wasm::Memory object. + + wasm::IndexType wasmIndexType() const { + return WasmArrayBufferIndexType(this); + } + wasm::Pages wasmPages() const { return WasmArrayBufferPages(this); } + wasm::Pages wasmClampedMaxPages() const { + return WasmArrayBufferClampedMaxPages(this); + } + mozilla::Maybe wasmSourceMaxPages() const { + return WasmArrayBufferSourceMaxPages(this); + } + size_t wasmMappedSize() const { return WasmArrayBufferMappedSize(this); } + + inline bool isPreparedForAsmJS() const; + inline bool isWasm() const; +}; + +using RootedArrayBufferObjectMaybeShared = + Rooted; +using HandleArrayBufferObjectMaybeShared = + Handle; +using MutableHandleArrayBufferObjectMaybeShared = + MutableHandle; + +/* + * ArrayBufferObject + * + * This class holds the underlying raw buffer that the various ArrayBufferViews + * (eg DataViewObject, the TypedArrays, TypedObjects) access. It can be created + * explicitly and used to construct an ArrayBufferView, or can be created + * lazily when it is first accessed for a TypedArrayObject or TypedObject that + * doesn't have an explicit buffer. + * + * ArrayBufferObject (or really the underlying memory) /is not racy/: the + * memory is private to a single worker. + */ +class ArrayBufferObject : public ArrayBufferObjectMaybeShared { + static bool byteLengthGetterImpl(JSContext* cx, const CallArgs& args); + + public: + static const uint8_t DATA_SLOT = 0; + static const uint8_t BYTE_LENGTH_SLOT = 1; + static const uint8_t FIRST_VIEW_SLOT = 2; + static const uint8_t FLAGS_SLOT = 3; + + static const uint8_t RESERVED_SLOTS = 4; + + static const size_t ARRAY_BUFFER_ALIGNMENT = 8; + + static_assert(FLAGS_SLOT == JS_ARRAYBUFFER_FLAGS_SLOT, + "self-hosted code with burned-in constants must get the " + "right flags slot"); + + // The length of an ArrayBuffer or SharedArrayBuffer can be at most INT32_MAX + // on 32-bit platforms. Allow a larger limit on 64-bit platforms. + static constexpr size_t MaxByteLengthForSmallBuffer = INT32_MAX; +#ifdef JS_64BIT + static constexpr size_t MaxByteLength = + size_t(8) * 1024 * 1024 * 1024; // 8 GB. +#else + static constexpr size_t MaxByteLength = MaxByteLengthForSmallBuffer; +#endif + + /** The largest number of bytes that can be stored inline. */ + static constexpr size_t MaxInlineBytes = + (NativeObject::MAX_FIXED_SLOTS - RESERVED_SLOTS) * sizeof(JS::Value); + + public: + enum BufferKind { + /** Inline data kept in the repurposed slots of this ArrayBufferObject. */ + INLINE_DATA = 0b000, + + /* Data allocated using the SpiderMonkey allocator. */ + MALLOCED = 0b001, + + /** + * No bytes are associated with this buffer. (This could be because the + * buffer is detached, because it's an internal, newborn buffer not yet + * overwritten with user-exposable semantics, or some other reason. The + * point is, don't read precise language semantics into this kind.) + */ + NO_DATA = 0b010, + + /** + * User-owned memory. The associated buffer must be manually detached + * before the user invalidates (deallocates, reuses the storage of, &c.) + * the user-owned memory. + */ + USER_OWNED = 0b011, + + WASM = 0b100, + MAPPED = 0b101, + EXTERNAL = 0b110, + + // These kind-values are currently invalid. We intend to expand valid + // BufferKinds in the future to either partly or fully use these values. + BAD1 = 0b111, + + KIND_MASK = 0b111 + }; + + public: + enum ArrayBufferFlags { + // The flags also store the BufferKind + BUFFER_KIND_MASK = BufferKind::KIND_MASK, + + DETACHED = 0b1000, + + // This MALLOCED, MAPPED, or EXTERNAL buffer has been prepared for asm.js + // and cannot henceforth be transferred/detached. (WASM, USER_OWNED, and + // INLINE_DATA buffers can't be prepared for asm.js -- although if an + // INLINE_DATA buffer is used with asm.js, it's silently rewritten into a + // MALLOCED buffer which *can* be prepared.) + FOR_ASMJS = 0b10'0000, + }; + + static_assert(JS_ARRAYBUFFER_DETACHED_FLAG == DETACHED, + "self-hosted code with burned-in constants must use the " + "correct DETACHED bit value"); + + protected: + enum class FillContents { Zero, Uninitialized }; + + template + static std::tuple createBufferAndData( + JSContext* cx, size_t nbytes, AutoSetNewObjectMetadata&, + JS::Handle proto = nullptr); + + public: + class BufferContents { + uint8_t* data_; + BufferKind kind_; + JS::BufferContentsFreeFunc free_; + void* freeUserData_; + + friend class ArrayBufferObject; + + BufferContents(uint8_t* data, BufferKind kind, + JS::BufferContentsFreeFunc freeFunc = nullptr, + void* freeUserData = nullptr) + : data_(data), + kind_(kind), + free_(freeFunc), + freeUserData_(freeUserData) { + MOZ_ASSERT((kind_ & ~KIND_MASK) == 0); + MOZ_ASSERT_IF(free_ || freeUserData_, kind_ == EXTERNAL); + + // It is the caller's responsibility to ensure that the + // BufferContents does not outlive the data. + } + + public: + static BufferContents createInlineData(void* data) { + return BufferContents(static_cast(data), INLINE_DATA); + } + + static BufferContents createMalloced(void* data) { + return BufferContents(static_cast(data), MALLOCED); + } + + static BufferContents createNoData() { + return BufferContents(nullptr, NO_DATA); + } + + static BufferContents createUserOwned(void* data) { + return BufferContents(static_cast(data), USER_OWNED); + } + + static BufferContents createWasm(void* data) { + return BufferContents(static_cast(data), WASM); + } + + static BufferContents createMapped(void* data) { + return BufferContents(static_cast(data), MAPPED); + } + + static BufferContents createExternal(void* data, + JS::BufferContentsFreeFunc freeFunc, + void* freeUserData = nullptr) { + return BufferContents(static_cast(data), EXTERNAL, freeFunc, + freeUserData); + } + + static BufferContents createFailed() { + // There's no harm in tagging this as MALLOCED, even tho obviously it + // isn't. And adding an extra tag purely for this case is a complication + // that presently appears avoidable. + return BufferContents(nullptr, MALLOCED); + } + + uint8_t* data() const { return data_; } + BufferKind kind() const { return kind_; } + JS::BufferContentsFreeFunc freeFunc() const { return free_; } + void* freeUserData() const { return freeUserData_; } + + explicit operator bool() const { return data_ != nullptr; } + WasmArrayRawBuffer* wasmBuffer() const; + }; + + static const JSClass class_; + static const JSClass protoClass_; + + static bool byteLengthGetter(JSContext* cx, unsigned argc, Value* vp); + + static bool fun_isView(JSContext* cx, unsigned argc, Value* vp); + + static bool class_constructor(JSContext* cx, unsigned argc, Value* vp); + + static bool isOriginalByteLengthGetter(Native native) { + return native == byteLengthGetter; + } + + static ArrayBufferObject* createForContents(JSContext* cx, size_t nbytes, + BufferContents contents); + + static ArrayBufferObject* copy( + JSContext* cx, JS::Handle unwrappedArrayBuffer); + + static ArrayBufferObject* createZeroed(JSContext* cx, size_t nbytes, + HandleObject proto = nullptr); + + // Create an ArrayBufferObject that is safely finalizable and can later be + // initialize()d to become a real, content-visible ArrayBufferObject. + static ArrayBufferObject* createEmpty(JSContext* cx); + + // Create an ArrayBufferObject using the provided buffer and size. Assumes + // ownership of |buffer| even in case of failure, i.e. on failure |buffer| + // is deallocated. + static ArrayBufferObject* createFromNewRawBuffer(JSContext* cx, + WasmArrayRawBuffer* buffer, + size_t initialSize); + + static void copyData(Handle toBuffer, size_t toIndex, + Handle fromBuffer, size_t fromIndex, + size_t count); + + static size_t objectMoved(JSObject* obj, JSObject* old); + + static uint8_t* stealMallocedContents(JSContext* cx, + Handle buffer); + + static BufferContents extractStructuredCloneContents( + JSContext* cx, Handle buffer); + + static void addSizeOfExcludingThis(JSObject* obj, + mozilla::MallocSizeOf mallocSizeOf, + JS::ClassInfo* info, + JS::RuntimeSizes* runtimeSizes); + + // ArrayBufferObjects (strongly) store the first view added to them, while + // later views are (weakly) stored in the compartment's InnerViewTable + // below. Buffers usually only have one view, so this slot optimizes for + // the common case. Avoiding entries in the InnerViewTable saves memory and + // non-incrementalized sweep time. + JSObject* firstView(); + + bool addView(JSContext* cx, ArrayBufferViewObject* view); + + // Detach this buffer from its original memory. (This necessarily makes + // views of this buffer unusable for modifying that original memory.) + static void detach(JSContext* cx, Handle buffer); + + static constexpr size_t offsetOfByteLengthSlot() { + return getFixedSlotOffset(BYTE_LENGTH_SLOT); + } + static constexpr size_t offsetOfFlagsSlot() { + return getFixedSlotOffset(FLAGS_SLOT); + } + + private: + void setFirstView(ArrayBufferViewObject* view); + + uint8_t* inlineDataPointer() const; + + struct FreeInfo { + JS::BufferContentsFreeFunc freeFunc; + void* freeUserData; + }; + FreeInfo* freeInfo() const; + + public: + uint8_t* dataPointer() const; + SharedMem dataPointerShared() const; + size_t byteLength() const; + + BufferContents contents() const { + if (isExternal()) { + return BufferContents(dataPointer(), EXTERNAL, freeInfo()->freeFunc, + freeInfo()->freeUserData); + } + return BufferContents(dataPointer(), bufferKind()); + } + bool hasInlineData() const { return dataPointer() == inlineDataPointer(); } + + void releaseData(JS::GCContext* gcx); + + BufferKind bufferKind() const { + return BufferKind(flags() & BUFFER_KIND_MASK); + } + + bool isInlineData() const { return bufferKind() == INLINE_DATA; } + bool isMalloced() const { return bufferKind() == MALLOCED; } + bool isNoData() const { return bufferKind() == NO_DATA; } + bool hasUserOwnedData() const { return bufferKind() == USER_OWNED; } + + bool isWasm() const { return bufferKind() == WASM; } + bool isMapped() const { return bufferKind() == MAPPED; } + bool isExternal() const { return bufferKind() == EXTERNAL; } + + bool isDetached() const { return flags() & DETACHED; } + bool isPreparedForAsmJS() const { return flags() & FOR_ASMJS; } + + // WebAssembly support: + + /** + * Prepare this ArrayBuffer for use with asm.js. Returns true on success, + * false on failure. This function reports no errors. + */ + [[nodiscard]] bool prepareForAsmJS(); + + size_t wasmMappedSize() const; + + wasm::IndexType wasmIndexType() const; + wasm::Pages wasmPages() const; + wasm::Pages wasmClampedMaxPages() const; + mozilla::Maybe wasmSourceMaxPages() const; + + [[nodiscard]] static bool wasmGrowToPagesInPlace( + wasm::IndexType t, wasm::Pages newPages, + Handle oldBuf, + MutableHandle newBuf, JSContext* cx); + [[nodiscard]] static bool wasmMovingGrowToPages( + wasm::IndexType t, wasm::Pages newPages, + Handle oldBuf, + MutableHandle newBuf, JSContext* cx); + static void wasmDiscard(Handle buf, uint64_t byteOffset, + uint64_t byteLength); + + static void finalize(JS::GCContext* gcx, JSObject* obj); + + static BufferContents createMappedContents(int fd, size_t offset, + size_t length); + + protected: + void setDataPointer(BufferContents contents); + void setByteLength(size_t length); + + size_t associatedBytes() const; + + uint32_t flags() const; + void setFlags(uint32_t flags); + + void setIsDetached() { setFlags(flags() | DETACHED); } + void setIsPreparedForAsmJS() { + MOZ_ASSERT(!isWasm()); + MOZ_ASSERT(!hasUserOwnedData()); + MOZ_ASSERT(!isInlineData()); + MOZ_ASSERT(isMalloced() || isMapped() || isExternal()); + setFlags(flags() | FOR_ASMJS); + } + + void initialize(size_t byteLength, BufferContents contents) { + setByteLength(byteLength); + setFlags(0); + setFirstView(nullptr); + setDataPointer(contents); + } + + void* initializeToInlineData(size_t byteLength) { + void* data = inlineDataPointer(); + initialize(byteLength, BufferContents::createInlineData(data)); + return data; + } +}; + +using RootedArrayBufferObject = Rooted; +using HandleArrayBufferObject = Handle; +using MutableHandleArrayBufferObject = MutableHandle; + +// Create a buffer for a wasm memory, whose type is determined by +// memory.indexType(). +bool CreateWasmBuffer(JSContext* cx, const wasm::MemoryDesc& memory, + MutableHandleArrayBufferObjectMaybeShared buffer); + +// Per-compartment table that manages the relationship between array buffers +// and the views that use their storage. +class InnerViewTable { + public: + using ViewVector = GCVector, 1, ZoneAllocPolicy>; + + friend class ArrayBufferObject; + + private: + // This key is a raw pointer and not a WeakHeapPtr because the post-barrier + // would hold nursery-allocated entries live unconditionally. It is a very + // common pattern in low-level and performance-oriented JavaScript to create + // hundreds or thousands of very short lived temporary views on a larger + // buffer; having to tenure all of these would be a catastrophic performance + // regression. Thus, it is vital that nursery pointers in this map not be held + // live. Special support is required in the minor GC, implemented in + // sweepAfterMinorGC. + using Map = GCHashMap, ViewVector, + StableCellHasher, ZoneAllocPolicy>; + + // For all objects sharing their storage with some other view, this maps + // the object to the list of such views. All entries in this map are weak. + Map map; + + // List of keys from innerViews where either the source or at least one + // target is in the nursery. The raw pointer to a JSObject is allowed here + // because this vector is cleared after every minor collection. Users in + // sweepAfterMinorCollection must be careful to use MaybeForwarded before + // touching these pointers. + Vector nurseryKeys; + + // Whether nurseryKeys is a complete list. + bool nurseryKeysValid; + + bool addView(JSContext* cx, ArrayBufferObject* buffer, JSObject* view); + ViewVector* maybeViewsUnbarriered(ArrayBufferObject* obj); + void removeViews(ArrayBufferObject* obj); + + public: + explicit InnerViewTable(Zone* zone) : map(zone), nurseryKeysValid(true) {} + + // Remove references to dead objects in the table and update table entries + // to reflect moved objects. + bool traceWeak(JSTracer* trc); + void sweepAfterMinorGC(JSTracer* trc); + + bool empty() const { return map.empty(); } + + bool needsSweepAfterMinorGC() const { + return !nurseryKeys.empty() || !nurseryKeysValid; + } + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf); +}; + +template +class MutableWrappedPtrOperations + : public WrappedPtrOperations { + InnerViewTable& table() { return static_cast(this)->get(); } + + public: + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) { + return table().sizeOfExcludingThis(mallocSizeOf); + } +}; + +class WasmArrayRawBuffer { + wasm::IndexType indexType_; + wasm::Pages clampedMaxPages_; + mozilla::Maybe sourceMaxPages_; + size_t mappedSize_; // Not including the header page + size_t length_; + + protected: + WasmArrayRawBuffer(wasm::IndexType indexType, uint8_t* buffer, + wasm::Pages clampedMaxPages, + const mozilla::Maybe& sourceMaxPages, + size_t mappedSize, size_t length) + : indexType_(indexType), + clampedMaxPages_(clampedMaxPages), + sourceMaxPages_(sourceMaxPages), + mappedSize_(mappedSize), + length_(length) { + MOZ_ASSERT(buffer == dataPointer()); + } + + public: + static WasmArrayRawBuffer* AllocateWasm( + wasm::IndexType indexType, wasm::Pages initialPages, + wasm::Pages clampedMaxPages, + const mozilla::Maybe& sourceMaxPages, + const mozilla::Maybe& mappedSize); + static void Release(void* mem); + + uint8_t* dataPointer() { + uint8_t* ptr = reinterpret_cast(this); + return ptr + sizeof(WasmArrayRawBuffer); + } + + static const WasmArrayRawBuffer* fromDataPtr(const uint8_t* dataPtr) { + return reinterpret_cast( + dataPtr - sizeof(WasmArrayRawBuffer)); + } + + static WasmArrayRawBuffer* fromDataPtr(uint8_t* dataPtr) { + return reinterpret_cast(dataPtr - + sizeof(WasmArrayRawBuffer)); + } + + wasm::IndexType indexType() const { return indexType_; } + + uint8_t* basePointer() { return dataPointer() - gc::SystemPageSize(); } + + size_t mappedSize() const { return mappedSize_; } + + size_t byteLength() const { return length_; } + + wasm::Pages pages() const { + return wasm::Pages::fromByteLengthExact(length_); + } + + wasm::Pages clampedMaxPages() const { return clampedMaxPages_; } + + mozilla::Maybe sourceMaxPages() const { return sourceMaxPages_; } + + [[nodiscard]] bool growToPagesInPlace(wasm::Pages newPages); + + [[nodiscard]] bool extendMappedSize(wasm::Pages maxPages); + + // Try and grow the mapped region of memory. Does not change current size. + // Does not move memory if no space to grow. + void tryGrowMaxPagesInPlace(wasm::Pages deltaMaxPages); + + // Discard a region of memory, zeroing the pages and releasing physical memory + // back to the operating system. byteOffset and byteLen must be wasm page + // aligned and in bounds. A discard of zero bytes will have no effect. + void discard(size_t byteOffset, size_t byteLen); +}; + +} // namespace js + +template <> +bool JSObject::is() const; + +#endif // vm_ArrayBufferObject_h diff --git a/js/src/vm/ArrayBufferObjectMaybeShared.cpp b/js/src/vm/ArrayBufferObjectMaybeShared.cpp new file mode 100644 index 0000000000..400a8baa3f --- /dev/null +++ b/js/src/vm/ArrayBufferObjectMaybeShared.cpp @@ -0,0 +1,76 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/Assertions.h" // MOZ_ASSERT + +#include // uint8_t, uint32_t + +#include "jstypes.h" // JS_PUBLIC_API + +#include "js/ArrayBufferMaybeShared.h" +#include "vm/ArrayBufferObject.h" // js::ArrayBufferObject +#include "vm/JSObject.h" // JSObject +#include "vm/SharedArrayObject.h" // js::SharedArrayBufferObject +#include "vm/SharedMem.h" // SharedMem + +using namespace js; + +JS_PUBLIC_API bool JS::IsArrayBufferObjectMaybeShared(JSObject* obj) { + return obj->canUnwrapAs(); +} + +JS_PUBLIC_API JSObject* JS::UnwrapArrayBufferMaybeShared(JSObject* obj) { + return obj->maybeUnwrapIf(); +} + +JS_PUBLIC_API void JS::GetArrayBufferMaybeSharedLengthAndData( + JSObject* obj, size_t* length, bool* isSharedMemory, uint8_t** data) { + MOZ_ASSERT(obj->is()); + + if (obj->is()) { + auto* buffer = &obj->as(); + *length = buffer->byteLength(); + *data = buffer->dataPointerShared().unwrap(); + *isSharedMemory = true; + } else { + auto* buffer = &obj->as(); + *length = buffer->byteLength(); + *data = buffer->dataPointer(); + *isSharedMemory = false; + } +} + +JS_PUBLIC_API uint8_t* JS::GetArrayBufferMaybeSharedData( + JSObject* obj, bool* isSharedMemory, const JS::AutoRequireNoGC&) { + MOZ_ASSERT(obj->maybeUnwrapIf()); + + if (ArrayBufferObject* aobj = obj->maybeUnwrapIf()) { + *isSharedMemory = false; + return aobj->dataPointer(); + } else if (SharedArrayBufferObject* saobj = + obj->maybeUnwrapIf()) { + *isSharedMemory = true; + return saobj->dataPointerShared().unwrap(); + } + + return nullptr; +} + +JS_PUBLIC_API bool JS::IsLargeArrayBufferMaybeShared(JSObject* obj) { +#ifdef JS_64BIT + obj = UnwrapArrayBufferMaybeShared(obj); + MOZ_ASSERT(obj); + size_t len = obj->is() + ? obj->as().byteLength() + : obj->as().byteLength(); + return len > ArrayBufferObject::MaxByteLengthForSmallBuffer; +#else + // Large ArrayBuffers are not supported on 32-bit. + static_assert(ArrayBufferObject::MaxByteLength == + ArrayBufferObject::MaxByteLengthForSmallBuffer); + return false; +#endif +} diff --git a/js/src/vm/ArrayBufferViewObject.cpp b/js/src/vm/ArrayBufferViewObject.cpp new file mode 100644 index 0000000000..11e6d9fa72 --- /dev/null +++ b/js/src/vm/ArrayBufferViewObject.cpp @@ -0,0 +1,319 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/ArrayBufferViewObject.h" + +#include "builtin/DataViewObject.h" +#include "gc/Nursery.h" +#include "js/experimental/TypedData.h" // JS_GetArrayBufferView{Data,Buffer,Length,ByteOffset}, JS_GetObjectAsArrayBufferView, JS_IsArrayBufferViewObject +#include "js/SharedArrayBuffer.h" +#include "vm/Compartment.h" +#include "vm/JSContext.h" +#include "vm/TypedArrayObject.h" + +#include "gc/Nursery-inl.h" +#include "vm/ArrayBufferObject-inl.h" +#include "vm/NativeObject-inl.h" + +using namespace js; + +// This method is used to trace TypedArrayObjects and DataViewObjects. It +// updates the object's data pointer if it points to inline data in an object +// that was moved. +/* static */ +void ArrayBufferViewObject::trace(JSTracer* trc, JSObject* obj) { + ArrayBufferViewObject* view = &obj->as(); + + // Update view's data pointer if it moved. + if (view->hasBuffer()) { + JSObject* bufferObj = &view->bufferValue().toObject(); + if (gc::MaybeForwardedObjectIs(bufferObj)) { + auto* buffer = &gc::MaybeForwardedObjectAs(bufferObj); + + size_t offset = view->byteOffset(); + MOZ_ASSERT_IF(!buffer->dataPointer(), offset == 0); + + // The data may or may not be inline with the buffer. The buffer can only + // move during a compacting GC, in which case its objectMoved hook has + // already updated the buffer's data pointer. + void* oldData = view->dataPointerEither_(); + void* data = buffer->dataPointer() + offset; + if (data != oldData) { + view->getFixedSlotRef(DATA_SLOT).unbarrieredSet(PrivateValue(data)); + } + } + } +} + +template <> +bool JSObject::is() const { + return is() || is(); +} + +void ArrayBufferViewObject::notifyBufferDetached() { + MOZ_ASSERT(!isSharedMemory()); + MOZ_ASSERT(hasBuffer()); + + setFixedSlot(LENGTH_SLOT, PrivateValue(size_t(0))); + setFixedSlot(BYTEOFFSET_SLOT, PrivateValue(size_t(0))); + setFixedSlot(DATA_SLOT, UndefinedValue()); +} + +/* static */ +ArrayBufferObjectMaybeShared* ArrayBufferViewObject::bufferObject( + JSContext* cx, Handle thisObject) { + if (thisObject->is()) { + Rooted typedArray(cx, + &thisObject->as()); + if (!TypedArrayObject::ensureHasBuffer(cx, typedArray)) { + return nullptr; + } + } + return thisObject->bufferEither(); +} + +bool ArrayBufferViewObject::init(JSContext* cx, + ArrayBufferObjectMaybeShared* buffer, + size_t byteOffset, size_t length, + uint32_t bytesPerElement) { + MOZ_ASSERT_IF(!buffer, byteOffset == 0); + MOZ_ASSERT_IF(buffer, !buffer->isDetached()); + + MOZ_ASSERT(byteOffset <= ArrayBufferObject::MaxByteLength); + MOZ_ASSERT(length <= ArrayBufferObject::MaxByteLength); + MOZ_ASSERT(byteOffset + length <= ArrayBufferObject::MaxByteLength); + + MOZ_ASSERT_IF(is(), + length <= TypedArrayObject::MaxByteLength / bytesPerElement); + + // The isSharedMemory property is invariant. Self-hosting code that + // sets BUFFER_SLOT or the private slot (if it does) must maintain it by + // always setting those to reference shared memory. + if (buffer && buffer->is()) { + setIsSharedMemory(); + } + + initFixedSlot(BYTEOFFSET_SLOT, PrivateValue(byteOffset)); + initFixedSlot(LENGTH_SLOT, PrivateValue(length)); + initFixedSlot(BUFFER_SLOT, ObjectOrNullValue(buffer)); + + if (buffer) { + SharedMem ptr = buffer->dataPointerEither(); + initDataPointer(ptr + byteOffset); + + // Only ArrayBuffers used for inline typed objects can have + // nursery-allocated data and we shouldn't see such buffers here. + MOZ_ASSERT_IF(buffer->byteLength() > 0, !cx->nursery().isInside(ptr)); + } else { + MOZ_ASSERT(is()); + MOZ_ASSERT(length * bytesPerElement <= + TypedArrayObject::INLINE_BUFFER_LIMIT); + void* data = fixedData(TypedArrayObject::FIXED_DATA_START); + initReservedSlot(DATA_SLOT, PrivateValue(data)); + memset(data, 0, length * bytesPerElement); +#ifdef DEBUG + if (length == 0) { + uint8_t* elements = static_cast(data); + elements[0] = ZeroLengthArrayData; + } +#endif + } + +#ifdef DEBUG + if (buffer) { + size_t viewByteLength = length * bytesPerElement; + size_t viewByteOffset = byteOffset; + size_t bufferByteLength = buffer->byteLength(); + // Unwraps are safe: both are for the pointer value. + MOZ_ASSERT_IF(buffer->is(), + buffer->dataPointerEither().unwrap(/*safe*/) <= + dataPointerEither().unwrap(/*safe*/)); + MOZ_ASSERT(bufferByteLength - viewByteOffset >= viewByteLength); + MOZ_ASSERT(viewByteOffset <= bufferByteLength); + } +#endif + + // ArrayBufferObjects track their views to support detaching. + if (buffer && buffer->is()) { + if (!buffer->as().addView(cx, this)) { + return false; + } + } + + return true; +} + +/* JS Public API */ + +JS_PUBLIC_API bool JS_IsArrayBufferViewObject(JSObject* obj) { + return obj->canUnwrapAs(); +} + +JS_PUBLIC_API JSObject* js::UnwrapArrayBufferView(JSObject* obj) { + return obj->maybeUnwrapIf(); +} + +JS_PUBLIC_API void* JS_GetArrayBufferViewData(JSObject* obj, + bool* isSharedMemory, + const JS::AutoRequireNoGC&) { + ArrayBufferViewObject* view = obj->maybeUnwrapAs(); + if (!view) { + return nullptr; + } + + *isSharedMemory = view->isSharedMemory(); + return view->dataPointerEither().unwrap( + /*safe - caller sees isSharedMemory flag*/); +} + +JS_PUBLIC_API uint8_t* JS_GetArrayBufferViewFixedData(JSObject* obj, + uint8_t* buffer, + size_t bufSize) { + ArrayBufferViewObject* view = obj->maybeUnwrapAs(); + if (!view) { + return nullptr; + } + + // Disallow shared memory until it is needed. + if (view->isSharedMemory()) { + return nullptr; + } + + // TypedArrays (but not DataViews) can have inline data, in which case we + // need to copy into the given buffer. + if (view->is()) { + TypedArrayObject* ta = &view->as(); + if (ta->hasInlineElements()) { + size_t bytes = ta->byteLength(); + if (bytes > bufSize) { + return nullptr; // Does not fit. + } + memcpy(buffer, view->dataPointerUnshared(), bytes); + return buffer; + } + } + + return static_cast(view->dataPointerUnshared()); +} + +JS_PUBLIC_API JSObject* JS_GetArrayBufferViewBuffer(JSContext* cx, + HandleObject obj, + bool* isSharedMemory) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj); + + Rooted unwrappedView( + cx, obj->maybeUnwrapAs()); + if (!unwrappedView) { + ReportAccessDenied(cx); + return nullptr; + } + + ArrayBufferObjectMaybeShared* unwrappedBuffer; + { + AutoRealm ar(cx, unwrappedView); + unwrappedBuffer = ArrayBufferViewObject::bufferObject(cx, unwrappedView); + if (!unwrappedBuffer) { + return nullptr; + } + } + *isSharedMemory = unwrappedBuffer->is(); + + RootedObject buffer(cx, unwrappedBuffer); + if (!cx->compartment()->wrap(cx, &buffer)) { + return nullptr; + } + + return buffer; +} + +JS_PUBLIC_API size_t JS_GetArrayBufferViewByteLength(JSObject* obj) { + obj = obj->maybeUnwrapAs(); + if (!obj) { + return 0; + } + size_t length = obj->is() + ? obj->as().byteLength() + : obj->as().byteLength(); + return length; +} + +bool JS::ArrayBufferView::isDetached() const { + MOZ_ASSERT(obj); + return obj->as().hasDetachedBuffer(); +} + +JS_PUBLIC_API size_t JS_GetArrayBufferViewByteOffset(JSObject* obj) { + obj = obj->maybeUnwrapAs(); + if (!obj) { + return 0; + } + size_t offset = obj->is() + ? obj->as().byteOffset() + : obj->as().byteOffset(); + return offset; +} + +JS_PUBLIC_API uint8_t* JS::ArrayBufferView::getLengthAndData( + size_t* length, bool* isSharedMemory, const AutoRequireNoGC&) { + MOZ_ASSERT(obj->is()); + size_t byteLength = obj->is() + ? obj->as().byteLength() + : obj->as().byteLength(); + *length = byteLength; // *Not* the number of elements in the array, if + // sizeof(elt) != 1. + + ArrayBufferViewObject& view = obj->as(); + *isSharedMemory = view.isSharedMemory(); + return static_cast( + view.dataPointerEither().unwrap(/*safe - caller sees isShared flag*/)); +} + +JS_PUBLIC_API JSObject* JS_GetObjectAsArrayBufferView(JSObject* obj, + size_t* length, + bool* isSharedMemory, + uint8_t** data) { + obj = obj->maybeUnwrapIf(); + if (!obj) { + return nullptr; + } + + js::GetArrayBufferViewLengthAndData(obj, length, isSharedMemory, data); + return obj; +} + +JS_PUBLIC_API void js::GetArrayBufferViewLengthAndData(JSObject* obj, + size_t* length, + bool* isSharedMemory, + uint8_t** data) { + JS::AutoAssertNoGC nogc; + *data = JS::ArrayBufferView::fromObject(obj).getLengthAndData( + length, isSharedMemory, nogc); +} + +JS_PUBLIC_API bool JS::IsArrayBufferViewShared(JSObject* obj) { + ArrayBufferViewObject* view = obj->maybeUnwrapAs(); + if (!view) { + return false; + } + return view->isSharedMemory(); +} + +JS_PUBLIC_API bool JS::IsLargeArrayBufferView(JSObject* obj) { +#ifdef JS_64BIT + obj = &obj->unwrapAs(); + size_t len = obj->is() + ? obj->as().byteLength() + : obj->as().byteLength(); + return len > ArrayBufferObject::MaxByteLengthForSmallBuffer; +#else + // Large ArrayBuffers are not supported on 32-bit. + static_assert(ArrayBufferObject::MaxByteLength == + ArrayBufferObject::MaxByteLengthForSmallBuffer); + return false; +#endif +} diff --git a/js/src/vm/ArrayBufferViewObject.h b/js/src/vm/ArrayBufferViewObject.h new file mode 100644 index 0000000000..7d6a4b70bd --- /dev/null +++ b/js/src/vm/ArrayBufferViewObject.h @@ -0,0 +1,166 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayBufferViewObject_h +#define vm_ArrayBufferViewObject_h + +#include "builtin/TypedArrayConstants.h" +#include "vm/ArrayBufferObject.h" +#include "vm/NativeObject.h" +#include "vm/SharedArrayObject.h" +#include "vm/SharedMem.h" + +namespace js { + +/* + * ArrayBufferViewObject + * + * Common base class for all array buffer views (DataViewObject and + * TypedArrayObject). + */ + +class ArrayBufferViewObject : public NativeObject { + public: + // Underlying (Shared)ArrayBufferObject. + static constexpr size_t BUFFER_SLOT = 0; + static_assert(BUFFER_SLOT == JS_TYPEDARRAYLAYOUT_BUFFER_SLOT, + "self-hosted code with burned-in constants must get the " + "right buffer slot"); + + // Slot containing length of the view in number of typed elements. + static constexpr size_t LENGTH_SLOT = 1; + + // Offset of view within underlying (Shared)ArrayBufferObject. + static constexpr size_t BYTEOFFSET_SLOT = 2; + + // Pointer to raw buffer memory. + static constexpr size_t DATA_SLOT = 3; + + static constexpr size_t RESERVED_SLOTS = 4; + +#ifdef DEBUG + static const uint8_t ZeroLengthArrayData = 0x4A; +#endif + + static constexpr int bufferOffset() { + return NativeObject::getFixedSlotOffset(BUFFER_SLOT); + } + static constexpr int lengthOffset() { + return NativeObject::getFixedSlotOffset(LENGTH_SLOT); + } + static constexpr int byteOffsetOffset() { + return NativeObject::getFixedSlotOffset(BYTEOFFSET_SLOT); + } + static constexpr int dataOffset() { + return NativeObject::getFixedSlotOffset(DATA_SLOT); + } + + private: + void* dataPointerEither_() const { + // Note, do not check whether shared or not + // Keep synced with js::GetArrayLengthAndData in jsfriendapi.h! + return maybePtrFromReservedSlot(DATA_SLOT); + } + + public: + [[nodiscard]] bool init(JSContext* cx, ArrayBufferObjectMaybeShared* buffer, + size_t byteOffset, size_t length, + uint32_t bytesPerElement); + + static ArrayBufferObjectMaybeShared* bufferObject( + JSContext* cx, Handle obj); + + void notifyBufferDetached(); + + void initDataPointer(SharedMem viewData) { + // Install a pointer to the buffer location that corresponds + // to offset zero within the typed array. + // + // The following unwrap is safe because the DATA_SLOT is + // accessed only from jitted code and from the + // dataPointerEither_() accessor above; in neither case does the + // raw pointer escape untagged into C++ code. + void* data = viewData.unwrap(/*safe - see above*/); + initReservedSlot(DATA_SLOT, PrivateValue(data)); + } + + SharedMem dataPointerShared() const { + return SharedMem::shared(dataPointerEither_()); + } + SharedMem dataPointerEither() const { + if (isSharedMemory()) { + return SharedMem::shared(dataPointerEither_()); + } + return SharedMem::unshared(dataPointerEither_()); + } + void* dataPointerUnshared() const { + MOZ_ASSERT(!isSharedMemory()); + return dataPointerEither_(); + } + + Value bufferValue() const { return getFixedSlot(BUFFER_SLOT); } + bool hasBuffer() const { return bufferValue().isObject(); } + + ArrayBufferObject* bufferUnshared() const { + MOZ_ASSERT(!isSharedMemory()); + ArrayBufferObjectMaybeShared* obj = bufferEither(); + if (!obj) { + return nullptr; + } + return &obj->as(); + } + SharedArrayBufferObject* bufferShared() const { + MOZ_ASSERT(isSharedMemory()); + ArrayBufferObjectMaybeShared* obj = bufferEither(); + if (!obj) { + return nullptr; + } + return &obj->as(); + } + ArrayBufferObjectMaybeShared* bufferEither() const { + JSObject* obj = bufferValue().toObjectOrNull(); + if (!obj) { + return nullptr; + } + MOZ_ASSERT(isSharedMemory() ? obj->is() + : obj->is()); + return &obj->as(); + } + + bool hasDetachedBuffer() const { + // Shared buffers can't be detached. + if (isSharedMemory()) { + return false; + } + + // A typed array with a null buffer has never had its buffer exposed to + // become detached. + ArrayBufferObject* buffer = bufferUnshared(); + if (!buffer) { + return false; + } + + return buffer->isDetached(); + } + + size_t byteOffset() const { + return size_t(getFixedSlot(BYTEOFFSET_SLOT).toPrivate()); + } + + Value byteOffsetValue() const { + size_t offset = byteOffset(); + return NumberValue(offset); + } + + static void trace(JSTracer* trc, JSObject* obj); +}; + +} // namespace js + +template <> +bool JSObject::is() const; + +#endif // vm_ArrayBufferViewObject_h diff --git a/js/src/vm/ArrayObject-inl.h b/js/src/vm/ArrayObject-inl.h new file mode 100644 index 0000000000..240a5f3aef --- /dev/null +++ b/js/src/vm/ArrayObject-inl.h @@ -0,0 +1,87 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayObject_inl_h +#define vm_ArrayObject_inl_h + +#include "vm/ArrayObject.h" + +#include "gc/Allocator.h" +#include "gc/GCProbes.h" + +#include "vm/JSObject-inl.h" +#include "vm/NativeObject-inl.h" + +namespace js { + +/* static */ MOZ_ALWAYS_INLINE ArrayObject* ArrayObject::create( + JSContext* cx, gc::AllocKind kind, gc::Heap heap, + Handle shape, uint32_t length, uint32_t slotSpan, + AutoSetNewObjectMetadata& metadata, gc::AllocSite* site) { + debugCheckNewObject(shape, kind, heap); + + const JSClass* clasp = &ArrayObject::class_; + MOZ_ASSERT(shape); + MOZ_ASSERT(shape->getObjectClass() == clasp); + MOZ_ASSERT(clasp->isNativeObject()); + MOZ_ASSERT(!clasp->hasFinalize()); + + // Note: the slot span is passed as argument to allow more constant folding + // below for the common case of slotSpan == 0. + MOZ_ASSERT(shape->slotSpan() == slotSpan); + + // Arrays can use their fixed slots to store elements, so can't have shapes + // which allow named properties to be stored in the fixed slots. + MOZ_ASSERT(shape->numFixedSlots() == 0); + + size_t nDynamicSlots = calculateDynamicSlots(0, slotSpan, clasp); + ArrayObject* aobj = cx->newCell(kind, heap, clasp, site); + if (!aobj) { + return nullptr; + } + + aobj->initShape(shape); + aobj->initFixedElements(kind, length); + + if (!nDynamicSlots) { + aobj->initEmptyDynamicSlots(); + } else if (!aobj->allocateInitialSlots(cx, nDynamicSlots)) { + return nullptr; + } + + MOZ_ASSERT(clasp->shouldDelayMetadataBuilder()); + cx->realm()->setObjectPendingMetadata(aobj); + + if (slotSpan > 0) { + aobj->initDynamicSlots(slotSpan); + } + + gc::gcprobes::CreateObject(aobj); + return aobj; +} + +inline DenseElementResult ArrayObject::addDenseElementNoLengthChange( + JSContext* cx, uint32_t index, const Value& val) { + MOZ_ASSERT(isExtensible()); + + // Only support the `index < length` case so that we don't have to increase + // the array's .length value below. + if (index >= length() || containsDenseElement(index) || isIndexed()) { + return DenseElementResult::Incomplete; + } + + DenseElementResult res = ensureDenseElements(cx, index, 1); + if (MOZ_UNLIKELY(res != DenseElementResult::Success)) { + return res; + } + + initDenseElement(index, val); + return DenseElementResult::Success; +} + +} // namespace js + +#endif // vm_ArrayObject_inl_h diff --git a/js/src/vm/ArrayObject.h b/js/src/vm/ArrayObject.h new file mode 100644 index 0000000000..8a10710dd8 --- /dev/null +++ b/js/src/vm/ArrayObject.h @@ -0,0 +1,62 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayObject_h +#define vm_ArrayObject_h + +#include "vm/NativeObject.h" + +namespace js { + +class AutoSetNewObjectMetadata; + +class ArrayObject : public NativeObject { + public: + // Array(x) eagerly allocates dense elements if x <= this value. Without + // the subtraction the max would roll over to the next power-of-two (4096) + // due to the way that growElements() and goodAllocated() work. + static const uint32_t EagerAllocationMaxLength = + 2048 - ObjectElements::VALUES_PER_HEADER; + + static const JSClass class_; + + bool lengthIsWritable() const { + return !getElementsHeader()->hasNonwritableArrayLength(); + } + + uint32_t length() const { return getElementsHeader()->length; } + + void setNonWritableLength(JSContext* cx) { + shrinkCapacityToInitializedLength(cx); + getElementsHeader()->setNonwritableArrayLength(); + } + + void setLength(uint32_t length) { + MOZ_ASSERT(lengthIsWritable()); + MOZ_ASSERT_IF(length != getElementsHeader()->length, + !denseElementsAreFrozen()); + getElementsHeader()->length = length; + } + + // Try to add a new dense element to this array. The array must be extensible. + // + // Returns DenseElementResult::Incomplete if `index >= length`, if the array + // has sparse elements, if we're adding a sparse element, or if the array + // already contains a dense element at this index. + inline DenseElementResult addDenseElementNoLengthChange(JSContext* cx, + uint32_t index, + const Value& val); + + // Make an array object with the specified initial state. + static MOZ_ALWAYS_INLINE ArrayObject* create( + JSContext* cx, gc::AllocKind kind, gc::Heap heap, + Handle shape, uint32_t length, uint32_t slotSpan, + AutoSetNewObjectMetadata& metadata, gc::AllocSite* site = nullptr); +}; + +} // namespace js + +#endif // vm_ArrayObject_h diff --git a/js/src/vm/AsyncFunction.cpp b/js/src/vm/AsyncFunction.cpp new file mode 100644 index 0000000000..7b2482f2a9 --- /dev/null +++ b/js/src/vm/AsyncFunction.cpp @@ -0,0 +1,349 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/AsyncFunction.h" + +#include "mozilla/Maybe.h" + +#include "jsapi.h" + +#include "builtin/ModuleObject.h" +#include "builtin/Promise.h" +#include "vm/FunctionFlags.h" // js::FunctionFlags +#include "vm/GeneratorObject.h" +#include "vm/GlobalObject.h" +#include "vm/Interpreter.h" +#include "vm/Modules.h" +#include "vm/NativeObject.h" +#include "vm/PromiseObject.h" // js::PromiseObject +#include "vm/Realm.h" +#include "vm/SelfHosting.h" + +#include "vm/JSContext-inl.h" +#include "vm/JSObject-inl.h" + +using namespace js; + +using mozilla::Maybe; + +static JSObject* CreateAsyncFunction(JSContext* cx, JSProtoKey key) { + RootedObject proto(cx, &cx->global()->getFunctionConstructor()); + Handle name = cx->names().AsyncFunction; + return NewFunctionWithProto(cx, AsyncFunctionConstructor, 1, + FunctionFlags::NATIVE_CTOR, nullptr, name, proto, + gc::AllocKind::FUNCTION, TenuredObject); +} + +static JSObject* CreateAsyncFunctionPrototype(JSContext* cx, JSProtoKey key) { + return NewTenuredObjectWithFunctionPrototype(cx, cx->global()); +} + +static bool AsyncFunctionClassFinish(JSContext* cx, HandleObject asyncFunction, + HandleObject asyncFunctionProto) { + // Change the "constructor" property to non-writable before adding any other + // properties, so it's still the last property and can be modified without a + // dictionary-mode transition. + MOZ_ASSERT(asyncFunctionProto->as().getLastProperty().key() == + NameToId(cx->names().constructor)); + MOZ_ASSERT(!asyncFunctionProto->as().inDictionaryMode()); + + RootedValue asyncFunctionVal(cx, ObjectValue(*asyncFunction)); + if (!DefineDataProperty(cx, asyncFunctionProto, cx->names().constructor, + asyncFunctionVal, JSPROP_READONLY)) { + return false; + } + MOZ_ASSERT(!asyncFunctionProto->as().inDictionaryMode()); + + return DefineToStringTag(cx, asyncFunctionProto, cx->names().AsyncFunction); +} + +static const ClassSpec AsyncFunctionClassSpec = { + CreateAsyncFunction, + CreateAsyncFunctionPrototype, + nullptr, + nullptr, + nullptr, + nullptr, + AsyncFunctionClassFinish, + ClassSpec::DontDefineConstructor}; + +const JSClass js::AsyncFunctionClass = {"AsyncFunction", 0, JS_NULL_CLASS_OPS, + &AsyncFunctionClassSpec}; + +enum class ResumeKind { Normal, Throw }; + +/** + * ES2022 draft rev d03c1ec6e235a5180fa772b6178727c17974cb14 + * + * Await in async function + * https://tc39.es/ecma262/#await + * + * Unified implementation of + * + * Step 3. fulfilledClosure Abstract Closure. + * Step 5. rejectedClosure Abstract Closure. + */ +static bool AsyncFunctionResume(JSContext* cx, + Handle generator, + ResumeKind kind, HandleValue valueOrReason) { + // We're enqueuing the promise job for Await before suspending the execution + // of the async function. So when either the debugger or OOM errors terminate + // the execution after JSOp::AsyncAwait, but before JSOp::Await, we're in an + // inconsistent state, because we don't have a resume index set and therefore + // don't know where to resume the async function. Return here in that case. + if (generator->isClosed()) { + return true; + } + + // The debugger sets the async function's generator object into the "running" + // state while firing debugger events to ensure the debugger can't re-enter + // the async function, cf. |AutoSetGeneratorRunning| in Debugger.cpp. Catch + // this case here by checking if the generator is already runnning. + if (generator->isRunning()) { + return true; + } + + Rooted resultPromise(cx, generator->promise()); + + RootedObject stack(cx); + Maybe asyncStack; + if (JSObject* allocationSite = resultPromise->allocationSite()) { + // The promise is created within the activation of the async function, so + // use the parent frame as the starting point for async stacks. + stack = allocationSite->as().getParent(); + if (stack) { + asyncStack.emplace( + cx, stack, "async", + JS::AutoSetAsyncStackForNewCalls::AsyncCallKind::EXPLICIT); + } + } + + MOZ_ASSERT(generator->isSuspended(), + "non-suspended generator when resuming async function"); + + // Step {3,5}.a. Let prevContext be the running execution context. + // Step {3,5}.b. Suspend prevContext. + // Step {3,5}.c. Push asyncContext onto the execution context stack; + // asyncContext is now the running execution context. + // + // fulfilledClosure + // Step 3.d. Resume the suspended evaluation of asyncContext using + // NormalCompletion(value) as the result of the operation that + // suspended it. + // + // rejectedClosure + // Step 5.d. Resume the suspended evaluation of asyncContext using + // ThrowCompletion(reason) as the result of the operation that + // suspended it. + // + // Execution context switching is handled in generator. + Handle funName = kind == ResumeKind::Normal + ? cx->names().AsyncFunctionNext + : cx->names().AsyncFunctionThrow; + FixedInvokeArgs<1> args(cx); + args[0].set(valueOrReason); + RootedValue generatorOrValue(cx, ObjectValue(*generator)); + if (!CallSelfHostedFunction(cx, funName, generatorOrValue, args, + &generatorOrValue)) { + if (!generator->isClosed()) { + generator->setClosed(); + } + + // Handle the OOM case mentioned above. + if (resultPromise->state() == JS::PromiseState::Pending && + cx->isExceptionPending()) { + RootedValue exn(cx); + if (!GetAndClearException(cx, &exn)) { + return false; + } + return AsyncFunctionThrown(cx, resultPromise, exn); + } + return false; + } + + // Step {3,f}.e. Assert: When we reach this step, asyncContext has already + // been removed from the execution context stack and + // prevContext is the currently running execution context. + // Step {3,f}.f. Return undefined. + MOZ_ASSERT_IF(generator->isClosed(), generatorOrValue.isObject()); + MOZ_ASSERT_IF(generator->isClosed(), + &generatorOrValue.toObject() == resultPromise); + MOZ_ASSERT_IF(!generator->isClosed(), generator->isAfterAwait()); + + return true; +} + +/** + * ES2022 draft rev d03c1ec6e235a5180fa772b6178727c17974cb14 + * + * Await in async function + * https://tc39.es/ecma262/#await + * + * Step 3. fulfilledClosure Abstract Closure. + */ +[[nodiscard]] bool js::AsyncFunctionAwaitedFulfilled( + JSContext* cx, Handle generator, + HandleValue value) { + return AsyncFunctionResume(cx, generator, ResumeKind::Normal, value); +} + +/** + * ES2022 draft rev d03c1ec6e235a5180fa772b6178727c17974cb14 + * + * Await in async function + * https://tc39.es/ecma262/#await + * + * Step 5. rejectedClosure Abstract Closure. + */ +[[nodiscard]] bool js::AsyncFunctionAwaitedRejected( + JSContext* cx, Handle generator, + HandleValue reason) { + return AsyncFunctionResume(cx, generator, ResumeKind::Throw, reason); +} + +JSObject* js::AsyncFunctionResolve( + JSContext* cx, Handle generator, + HandleValue valueOrReason, AsyncFunctionResolveKind resolveKind) { + Rooted promise(cx, generator->promise()); + if (resolveKind == AsyncFunctionResolveKind::Fulfill) { + if (!AsyncFunctionReturned(cx, promise, valueOrReason)) { + return nullptr; + } + } else { + if (!AsyncFunctionThrown(cx, promise, valueOrReason)) { + return nullptr; + } + } + return promise; +} + +const JSClass AsyncFunctionGeneratorObject::class_ = { + "AsyncFunctionGenerator", + JSCLASS_HAS_RESERVED_SLOTS(AsyncFunctionGeneratorObject::RESERVED_SLOTS), + &classOps_, +}; + +const JSClassOps AsyncFunctionGeneratorObject::classOps_ = { + nullptr, // addProperty + nullptr, // delProperty + nullptr, // enumerate + nullptr, // newEnumerate + nullptr, // resolve + nullptr, // mayResolve + nullptr, // finalize + nullptr, // call + nullptr, // construct + CallTraceMethod, // trace +}; + +AsyncFunctionGeneratorObject* AsyncFunctionGeneratorObject::create( + JSContext* cx, HandleFunction fun) { + MOZ_ASSERT(fun->isAsync() && !fun->isGenerator()); + + Rooted resultPromise(cx, CreatePromiseObjectForAsync(cx)); + if (!resultPromise) { + return nullptr; + } + + auto* obj = NewBuiltinClassInstance(cx); + if (!obj) { + return nullptr; + } + obj->initFixedSlot(PROMISE_SLOT, ObjectValue(*resultPromise)); + + // Starts in the running state. + obj->setResumeIndex(AbstractGeneratorObject::RESUME_INDEX_RUNNING); + + return obj; +} + +JSFunction* NewHandler(JSContext* cx, Native handler, + JS::Handle target) { + cx->check(target); + + JS::Handle funName = cx->names().empty; + JS::Rooted handlerFun( + cx, NewNativeFunction(cx, handler, 0, funName, + gc::AllocKind::FUNCTION_EXTENDED, GenericObject)); + if (!handlerFun) { + return nullptr; + } + handlerFun->setExtendedSlot(FunctionExtended::MODULE_SLOT, + JS::ObjectValue(*target)); + return handlerFun; +} + +static bool AsyncModuleExecutionFulfilledHandler(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + JSFunction& func = args.callee().as(); + + Rooted module( + cx, &func.getExtendedSlot(FunctionExtended::MODULE_SLOT) + .toObject() + .as()); + AsyncModuleExecutionFulfilled(cx, module); + args.rval().setUndefined(); + return true; +} + +static bool AsyncModuleExecutionRejectedHandler(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + JSFunction& func = args.callee().as(); + Rooted module( + cx, &func.getExtendedSlot(FunctionExtended::MODULE_SLOT) + .toObject() + .as()); + AsyncModuleExecutionRejected(cx, module, args.get(0)); + args.rval().setUndefined(); + return true; +} + +AsyncFunctionGeneratorObject* AsyncFunctionGeneratorObject::create( + JSContext* cx, Handle module) { + // TODO: Module is currently hitching a ride with + // AsyncFunctionGeneratorObject. The reason for this is we have some work in + // the JITs that make use of this object when we hit AsyncAwait bytecode. At + // the same time, top level await shares a lot of it's implementation with + // AsyncFunction. I am not sure if the best thing to do here is inherit, + // override, or do something else. Comments appreciated. + MOZ_ASSERT(module->script()->isAsync()); + + Rooted resultPromise(cx, CreatePromiseObjectForAsync(cx)); + if (!resultPromise) { + return nullptr; + } + + Rooted obj( + cx, NewBuiltinClassInstance(cx)); + if (!obj) { + return nullptr; + } + obj->initFixedSlot(PROMISE_SLOT, ObjectValue(*resultPromise)); + + RootedObject onFulfilled( + cx, NewHandler(cx, AsyncModuleExecutionFulfilledHandler, module)); + if (!onFulfilled) { + return nullptr; + } + + RootedObject onRejected( + cx, NewHandler(cx, AsyncModuleExecutionRejectedHandler, module)); + if (!onRejected) { + return nullptr; + } + + if (!JS::AddPromiseReactionsIgnoringUnhandledRejection( + cx, resultPromise, onFulfilled, onRejected)) { + return nullptr; + } + + // Starts in the running state. + obj->setResumeIndex(AbstractGeneratorObject::RESUME_INDEX_RUNNING); + + return obj; +} diff --git a/js/src/vm/AsyncFunction.h b/js/src/vm/AsyncFunction.h new file mode 100644 index 0000000000..dbcfa2aec5 --- /dev/null +++ b/js/src/vm/AsyncFunction.h @@ -0,0 +1,324 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_AsyncFunction_h +#define vm_AsyncFunction_h + +#include "js/Class.h" +#include "vm/AsyncFunctionResolveKind.h" // AsyncFunctionResolveKind +#include "vm/GeneratorObject.h" +#include "vm/JSObject.h" +#include "vm/PromiseObject.h" + +// [SMDOC] Async functions +// +// # Implementation +// +// Async functions are implemented based on generators, in terms of +// suspend/resume. +// Instead of returning the generator object itself, they return the async +// function's result promise to the caller. +// +// The async function's result promise is stored in the generator object +// (js::AsyncFunctionGeneratorObject) and retrieved from it whenever the +// execution needs it. +// +// +// # Start +// +// When an async function is called, it synchronously runs until the first +// `await` or `return`. This works just like a normal function. +// +// This corresponds to steps 1-3, 5-9 of AsyncFunctionStart. +// +// AsyncFunctionStart ( promiseCapability, asyncFunctionBody ) +// https://tc39.es/ecma262/#sec-async-functions-abstract-operations-async-function-start +// +// 1. Let runningContext be the running execution context. +// 2. Let asyncContext be a copy of runningContext. +// 3. NOTE: Copying the execution state is required for the step below to +// resume its execution. It is ill-defined to resume a currently executing +// context. +// ... +// 5. Push asyncContext onto the execution context stack; asyncContext is now +// the running execution context. +// 6. Resume the suspended evaluation of asyncContext. Let result be the value +// returned by the resumed computation. +// 7. Assert: When we return here, asyncContext has already been removed from +// the execution context stack and runningContext is the currently running +// execution context. +// 8. Assert: result is a normal completion with a value of undefined. The +// possible sources of completion values are Await or, if the async +// function doesn't await anything, step 4.g above. +// 9. Return. +// +// Unlike generators, async functions don't contain JSOp::InitialYield and +// don't suspend immediately when call. +// +// +// # Return +// +// Explicit/implicit `return` is implemented with the following bytecode +// sequence: +// +// ``` +// GetAliasedVar ".generator" # VALUE .generator +// AsyncResolve 0 # PROMISE +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// ``` +// +// JSOp::Resolve (js::AsyncFunctionResolve) resolves the current async +// function's result promise. Then this sets it as the function's return value. +// (The return value is observable if the caller is still on the stack-- +// that is, the async function is returning without ever awaiting. +// Otherwise we're returning to the microtask loop, which ignores the +// return value.) +// +// This corresponds to AsyncFunctionStart steps 4.a-e. 4.g. +// +// 4. Set the code evaluation state of asyncContext such that when evaluation +// is resumed for that execution context the following steps will be +// performed: +// a. Let result be the result of evaluating asyncFunctionBody. +// b. Assert: If we return here, the async function either threw an +// exception or performed an implicit or explicit return; all awaiting +// is done. +// c. Remove asyncContext from the execution context stack and restore the +// execution context that is at the top of the execution context stack as +// the running execution context. +// d. If result.[[Type]] is normal, then +// i. Perform +// ! Call(promiseCapability.[[Resolve]], undefined, «undefined»). +// e. Else if result.[[Type]] is return, then +// i. Perform +// ! Call(promiseCapability.[[Resolve]], undefined, +// «result.[[Value]]»). +// ... +// g. Return. +// +// +// # Throw +// +// The body part of an async function is enclosed by an implicit try-catch +// block, to catch `throw` completion of the function body. +// +// If an exception is thrown by the function body, the catch block catches it +// and rejects the async function's result promise. +// +// If there's an expression in parameters, the entire parameters part is also +// enclosed by a separate implicit try-catch block. +// +// ``` +// Try # +// (parameter expressions here) # +// Goto BODY # +// +// JumpTarget from try # +// Exception # EXCEPTION +// GetAliasedVar ".generator" # EXCEPTION .generator +// AsyncResolve 1 # PROMISE +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// +// BODY: +// JumpTarget # +// Try # +// (body here) # +// +// JumpTarget from try # +// Exception # EXCEPTION +// GetAliasedVar ".generator" # EXCEPTION .generator +// AsyncResolve 1 # PROMISE +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// ``` +// +// This corresponds to AsyncFunctionStart steps 4.f-g. +// +// 4. ... +// f. Else, +// i. Assert: result.[[Type]] is throw. +// ii. Perform +// ! Call(promiseCapability.[[Reject]], undefined, +// «result.[[Value]]»). +// g. Return. +// +// +// # Await +// +// `await` is implemented with the following bytecode sequence: +// (ignoring CanSkipAwait for now, see "Optimization for await" section) +// +// ``` +// (operand here) # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// AsyncAwait # PROMISE +// +// GetAliasedVar ".generator" # PROMISE .generator +// Await 0 # RVAL GENERATOR RESUMEKIND +// +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// ``` +// +// JSOp::AsyncAwait corresponds to Await steps 1-9, and JSOp::Await corresponds +// to Await steps 10-12 in the spec. +// +// See the next section for JSOp::CheckResumeKind. +// +// After them, the async function is suspended, and if this is the first await +// in the execution, the async function's result promise is returned to the +// caller. +// +// Await +// https://tc39.es/ecma262/#await +// +// 1. Let asyncContext be the running execution context. +// 2. Let promise be ? PromiseResolve(%Promise%, value). +// 3. Let stepsFulfilled be the algorithm steps defined in Await Fulfilled +// Functions. +// 4. Let onFulfilled be ! CreateBuiltinFunction(stepsFulfilled, « +// [[AsyncContext]] »). +// 5. Set onFulfilled.[[AsyncContext]] to asyncContext. +// 6. Let stepsRejected be the algorithm steps defined in Await Rejected +// Functions. +// 7. Let onRejected be ! CreateBuiltinFunction(stepsRejected, « +// [[AsyncContext]] »). +// 8. Set onRejected.[[AsyncContext]] to asyncContext. +// 9. Perform ! PerformPromiseThen(promise, onFulfilled, onRejected). +// 10. Remove asyncContext from the execution context stack and restore the +// execution context that is at the top of the execution context stack as +// the running execution context. +// 11. Set the code evaluation state of asyncContext such that when evaluation +// is resumed with a Completion completion, the following steps of the +// algorithm that invoked Await will be performed, with completion +// available. +// 12. Return. +// 13. NOTE: This returns to the evaluation of the operation that had most +// previously resumed evaluation of asyncContext. +// +// (See comments above AsyncAwait and Await in js/src/vm/Opcodes.h for more +// details) +// +// +// # Reaction jobs and resume after await +// +// When an async function performs `await` and the operand becomes settled, a +// new reaction job for the operand is enqueued to the job queue. +// +// The reaction record for the job is marked as "this is for async function" +// (see js::AsyncFunctionAwait), and handled specially in +// js::PromiseReactionJob. +// +// When the await operand resolves (either with fulfillment or rejection), +// the async function is resumed from the job queue, by calling +// js::AsyncFunctionAwaitedFulfilled or js::AsyncFunctionAwaitedRejected +// from js::AsyncFunctionPromiseReactionJob. +// +// The execution resumes from JSOp::AfterYield, with the resolved value +// and the resume kind, either normal or throw, corresponds to fulfillment or +// rejection, on the stack. +// +// The resume kind is handled by JSOp::CheckResumeKind after that. +// +// If the resume kind is normal (=fulfillment), the async function resumes +// the execution with the resolved value as the result of `await`. +// +// If the resume kind is throw (=rejection), it throws the resolved value, +// and it will be caught by the try-catch explained above. +// +// +// # Optimization for await +// +// Suspending the execution and going into the embedding's job queue is slow +// and hard to optimize. +// +// If the following conditions are met, we don't have to perform the above +// but just use the await operand as the result of await. +// +// 1. The await operand is either non-promise or already-fulfilled promise, +// so that the result value is already known +// 2. There's no jobs in the job queue, +// so that we don't have to perform other jobs before resuming from +// await +// 3. Promise constructor/prototype are not modified, +// so that the optimization isn't visible to the user code +// +// This is implemented by the following bytecode sequence: +// +// ``` +// (operand here) # VALUE +// +// CanSkipAwait # VALUE, CAN_SKIP +// MaybeExtractAwaitValue # VALUE_OR_RVAL, CAN_SKIP +// JumpIfTrue END # VALUE +// +// JumpTarget # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// Await 0 # RVAL GENERATOR RESUMEKIND +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// +// END: +// JumpTarget # RVAL +// ``` +// +// JSOp::CanSkipAwait checks the above conditions. MaybeExtractAwaitValue will +// replace Value if it can be skipped, and then the await is jumped over. + +namespace js { + +class AsyncFunctionGeneratorObject; + +extern const JSClass AsyncFunctionClass; + +// Resume the async function when the `await` operand resolves. +// Split into two functions depending on whether the awaited value was +// fulfilled or rejected. +[[nodiscard]] bool AsyncFunctionAwaitedFulfilled( + JSContext* cx, Handle generator, + HandleValue value); + +[[nodiscard]] bool AsyncFunctionAwaitedRejected( + JSContext* cx, Handle generator, + HandleValue reason); + +// Resolve the async function's promise object with the given value and then +// return the promise object. +JSObject* AsyncFunctionResolve(JSContext* cx, + Handle generator, + HandleValue valueOrReason, + AsyncFunctionResolveKind resolveKind); + +class AsyncFunctionGeneratorObject : public AbstractGeneratorObject { + public: + enum { + PROMISE_SLOT = AbstractGeneratorObject::RESERVED_SLOTS, + + RESERVED_SLOTS + }; + + static const JSClass class_; + static const JSClassOps classOps_; + + static AsyncFunctionGeneratorObject* create(JSContext* cx, + HandleFunction asyncGen); + + static AsyncFunctionGeneratorObject* create(JSContext* cx, + Handle module); + + PromiseObject* promise() { + return &getFixedSlot(PROMISE_SLOT).toObject().as(); + } +}; + +} // namespace js + +#endif /* vm_AsyncFunction_h */ diff --git a/js/src/vm/AsyncFunctionResolveKind.h b/js/src/vm/AsyncFunctionResolveKind.h new file mode 100644 index 0000000000..75adfcec3c --- /dev/null +++ b/js/src/vm/AsyncFunctionResolveKind.h @@ -0,0 +1,18 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_AsyncFunctionResolveKind_h +#define vm_AsyncFunctionResolveKind_h + +#include // uint8_t + +namespace js { + +enum class AsyncFunctionResolveKind : uint8_t { Fulfill, Reject }; + +} // namespace js + +#endif /* vm_AsyncFunctionResolveKind_h */ diff --git a/js/src/vm/AsyncIteration.cpp b/js/src/vm/AsyncIteration.cpp new file mode 100644 index 0000000000..293fc8c31c --- /dev/null +++ b/js/src/vm/AsyncIteration.cpp @@ -0,0 +1,1484 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/AsyncIteration.h" + +#include "builtin/Promise.h" // js::PromiseHandler, js::CreatePromiseObjectForAsyncGenerator, js::AsyncFromSyncIteratorMethod, js::ResolvePromiseInternal, js::RejectPromiseInternal, js::InternalAsyncGeneratorAwait +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "js/PropertySpec.h" +#include "vm/CompletionKind.h" +#include "vm/FunctionFlags.h" // js::FunctionFlags +#include "vm/GeneratorObject.h" +#include "vm/GlobalObject.h" +#include "vm/Interpreter.h" +#include "vm/PlainObject.h" // js::PlainObject +#include "vm/PromiseObject.h" // js::PromiseObject +#include "vm/Realm.h" +#include "vm/SelfHosting.h" +#include "vm/WellKnownAtom.h" // js_*_str + +#include "vm/JSObject-inl.h" +#include "vm/List-inl.h" + +using namespace js; + +// --------------- +// Async generator +// --------------- + +const JSClass AsyncGeneratorObject::class_ = { + "AsyncGenerator", + JSCLASS_HAS_RESERVED_SLOTS(AsyncGeneratorObject::Slots), + &classOps_, +}; + +const JSClassOps AsyncGeneratorObject::classOps_ = { + nullptr, // addProperty + nullptr, // delProperty + nullptr, // enumerate + nullptr, // newEnumerate + nullptr, // resolve + nullptr, // mayResolve + nullptr, // finalize + nullptr, // call + nullptr, // construct + CallTraceMethod, // trace +}; + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// OrdinaryCreateFromConstructor ( constructor, intrinsicDefaultProto +// [ , internalSlotsList ] ) +// https://tc39.es/ecma262/#sec-ordinarycreatefromconstructor +// +// specialized for AsyncGeneratorObjects. +static AsyncGeneratorObject* OrdinaryCreateFromConstructorAsynGen( + JSContext* cx, HandleFunction constructor) { + // Step 1: Assert... + // (implicit) + + // Step 2. Let proto be + // ? GetPrototypeFromConstructor(constructor, intrinsicDefaultProto). + RootedValue protoVal(cx); + if (!GetProperty(cx, constructor, constructor, cx->names().prototype, + &protoVal)) { + return nullptr; + } + + RootedObject proto(cx, protoVal.isObject() ? &protoVal.toObject() : nullptr); + if (!proto) { + proto = GlobalObject::getOrCreateAsyncGeneratorPrototype(cx, cx->global()); + if (!proto) { + return nullptr; + } + } + + // Step 3. Return ! OrdinaryObjectCreate(proto, internalSlotsList). + return NewObjectWithGivenProto(cx, proto); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorStart ( generator, generatorBody ) +// https://tc39.es/ecma262/#sec-asyncgeneratorstart +// +// Steps 6-7. +/* static */ +AsyncGeneratorObject* AsyncGeneratorObject::create(JSContext* cx, + HandleFunction asyncGen) { + MOZ_ASSERT(asyncGen->isAsync() && asyncGen->isGenerator()); + + AsyncGeneratorObject* generator = + OrdinaryCreateFromConstructorAsynGen(cx, asyncGen); + if (!generator) { + return nullptr; + } + + // Step 6. Set generator.[[AsyncGeneratorState]] to suspendedStart. + generator->setSuspendedStart(); + + // Step 7. Set generator.[[AsyncGeneratorQueue]] to a new empty List. + generator->clearSingleQueueRequest(); + + generator->clearCachedRequest(); + + return generator; +} + +/* static */ +AsyncGeneratorRequest* AsyncGeneratorObject::createRequest( + JSContext* cx, Handle generator, + CompletionKind completionKind, HandleValue completionValue, + Handle promise) { + if (!generator->hasCachedRequest()) { + return AsyncGeneratorRequest::create(cx, completionKind, completionValue, + promise); + } + + AsyncGeneratorRequest* request = generator->takeCachedRequest(); + request->init(completionKind, completionValue, promise); + return request; +} + +/* static */ [[nodiscard]] bool AsyncGeneratorObject::enqueueRequest( + JSContext* cx, Handle generator, + Handle request) { + if (generator->isSingleQueue()) { + if (generator->isSingleQueueEmpty()) { + generator->setSingleQueueRequest(request); + return true; + } + + Rooted queue(cx, ListObject::create(cx)); + if (!queue) { + return false; + } + + RootedValue requestVal(cx, ObjectValue(*generator->singleQueueRequest())); + if (!queue->append(cx, requestVal)) { + return false; + } + requestVal = ObjectValue(*request); + if (!queue->append(cx, requestVal)) { + return false; + } + + generator->setQueue(queue); + return true; + } + + Rooted queue(cx, generator->queue()); + RootedValue requestVal(cx, ObjectValue(*request)); + return queue->append(cx, requestVal); +} + +/* static */ +AsyncGeneratorRequest* AsyncGeneratorObject::dequeueRequest( + JSContext* cx, Handle generator) { + if (generator->isSingleQueue()) { + AsyncGeneratorRequest* request = generator->singleQueueRequest(); + generator->clearSingleQueueRequest(); + return request; + } + + Rooted queue(cx, generator->queue()); + return &queue->popFirstAs(cx); +} + +/* static */ +AsyncGeneratorRequest* AsyncGeneratorObject::peekRequest( + Handle generator) { + if (generator->isSingleQueue()) { + return generator->singleQueueRequest(); + } + + return &generator->queue()->getAs(0); +} + +const JSClass AsyncGeneratorRequest::class_ = { + "AsyncGeneratorRequest", + JSCLASS_HAS_RESERVED_SLOTS(AsyncGeneratorRequest::Slots)}; + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorRequest Records +// https://tc39.es/ecma262/#sec-asyncgeneratorrequest-records +/* static */ +AsyncGeneratorRequest* AsyncGeneratorRequest::create( + JSContext* cx, CompletionKind completionKind, HandleValue completionValue, + Handle promise) { + AsyncGeneratorRequest* request = + NewObjectWithGivenProto(cx, nullptr); + if (!request) { + return nullptr; + } + + request->init(completionKind, completionValue, promise); + return request; +} + +[[nodiscard]] static bool AsyncGeneratorResume( + JSContext* cx, Handle generator, + CompletionKind completionKind, HandleValue argument); + +[[nodiscard]] static bool AsyncGeneratorDrainQueue( + JSContext* cx, Handle generator); + +[[nodiscard]] static bool AsyncGeneratorCompleteStepNormal( + JSContext* cx, Handle generator, HandleValue value, + bool done); + +[[nodiscard]] static bool AsyncGeneratorCompleteStepThrow( + JSContext* cx, Handle generator, + HandleValue exception); + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorStart ( generator, generatorBody ) +// https://tc39.es/ecma262/#sec-asyncgeneratorstart +// +// Steps 4.e-j. "return" case. +[[nodiscard]] static bool AsyncGeneratorReturned( + JSContext* cx, Handle generator, HandleValue value) { + // Step 4.e. Set generator.[[AsyncGeneratorState]] to completed. + generator->setCompleted(); + + // Step 4.g. If result.[[Type]] is return, set result to + // NormalCompletion(result.[[Value]]). + // (implicit) + + // Step 4.h. Perform ! AsyncGeneratorCompleteStep(generator, result, true). + if (!AsyncGeneratorCompleteStepNormal(cx, generator, value, true)) { + return false; + } + + // Step 4.i. Perform ! AsyncGeneratorDrainQueue(generator). + // Step 4.j. Return undefined. + return AsyncGeneratorDrainQueue(cx, generator); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorStart ( generator, generatorBody ) +// https://tc39.es/ecma262/#sec-asyncgeneratorstart +// +// Steps 4.e-j. "throw" case. +[[nodiscard]] static bool AsyncGeneratorThrown( + JSContext* cx, Handle generator) { + // Step 4.e. Set generator.[[AsyncGeneratorState]] to completed. + generator->setCompleted(); + + // Not much we can do about uncatchable exceptions, so just bail. + if (!cx->isExceptionPending()) { + return false; + } + + // Step 4.h. Perform ! AsyncGeneratorCompleteStep(generator, result, true). + RootedValue value(cx); + if (!GetAndClearException(cx, &value)) { + return false; + } + if (!AsyncGeneratorCompleteStepThrow(cx, generator, value)) { + return false; + } + + // Step 4.i. Perform ! AsyncGeneratorDrainQueue(generator). + // Step 4.j. Return undefined. + return AsyncGeneratorDrainQueue(cx, generator); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorUnwrapYieldResumption ( resumptionValue ) +// https://tc39.es/ecma262/#sec-asyncgeneratorunwrapyieldresumption +// +// Steps 4-5. +[[nodiscard]] static bool AsyncGeneratorYieldReturnAwaitedFulfilled( + JSContext* cx, Handle generator, HandleValue value) { + MOZ_ASSERT(generator->isAwaitingYieldReturn(), + "YieldReturn-Await fulfilled when not in " + "'AwaitingYieldReturn' state"); + + // Step 4. Assert: awaited.[[Type]] is normal. + // Step 5. Return Completion { [[Type]]: return, [[Value]]: + // awaited.[[Value]], [[Target]]: empty }. + return AsyncGeneratorResume(cx, generator, CompletionKind::Return, value); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorUnwrapYieldResumption ( resumptionValue ) +// https://tc39.es/ecma262/#sec-asyncgeneratorunwrapyieldresumption +// +// Step 3. +[[nodiscard]] static bool AsyncGeneratorYieldReturnAwaitedRejected( + JSContext* cx, Handle generator, + HandleValue reason) { + MOZ_ASSERT( + generator->isAwaitingYieldReturn(), + "YieldReturn-Await rejected when not in 'AwaitingYieldReturn' state"); + + // Step 3. If awaited.[[Type]] is throw, return Completion(awaited). + return AsyncGeneratorResume(cx, generator, CompletionKind::Throw, reason); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorUnwrapYieldResumption ( resumptionValue ) +// https://tc39.es/ecma262/#sec-asyncgeneratorunwrapyieldresumption +// +// Steps 1-2. +[[nodiscard]] static bool AsyncGeneratorUnwrapYieldResumptionAndResume( + JSContext* cx, Handle generator, + CompletionKind completionKind, HandleValue resumptionValue) { + // Step 1. If resumptionValue.[[Type]] is not return, return + // Completion(resumptionValue). + if (completionKind != CompletionKind::Return) { + return AsyncGeneratorResume(cx, generator, completionKind, resumptionValue); + } + + // Step 2. Let awaited be Await(resumptionValue.[[Value]]). + // + // Since we don't have the place that handles return from yield + // inside the generator, handle the case here, with extra state + // State_AwaitingYieldReturn. + generator->setAwaitingYieldReturn(); + + const PromiseHandler onFulfilled = + PromiseHandler::AsyncGeneratorYieldReturnAwaitedFulfilled; + const PromiseHandler onRejected = + PromiseHandler::AsyncGeneratorYieldReturnAwaitedRejected; + + return InternalAsyncGeneratorAwait(cx, generator, resumptionValue, + onFulfilled, onRejected); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorYield ( value ) +// https://tc39.es/ecma262/#sec-asyncgeneratoryield +// +// Stesp 10-13. +[[nodiscard]] static bool AsyncGeneratorYield( + JSContext* cx, Handle generator, HandleValue value) { + // Step 10. Perform + // ! AsyncGeneratorCompleteStep(generator, completion, false, + // previousRealm). + if (!AsyncGeneratorCompleteStepNormal(cx, generator, value, false)) { + return false; + } + + // Step 11. Let queue be generator.[[AsyncGeneratorQueue]]. + // Step 12. If queue is not empty, then + // Step 13. Else, + // (reordered) + if (generator->isQueueEmpty()) { + // Step 13.a. Set generator.[[AsyncGeneratorState]] to suspendedYield. + generator->setSuspendedYield(); + + // Steps 13.b-c are done in caller. + + // Step 13.d. Return undefined. + return true; + } + + // Step 12. If queue is not empty, then + // Step 12.a. NOTE: Execution continues without suspending the generator. + + // Step 12.b. Let toYield be the first element of queue. + Rooted toYield( + cx, AsyncGeneratorObject::peekRequest(generator)); + if (!toYield) { + return false; + } + + // Step 12.c. Let resumptionValue be toYield.[[Completion]]. + CompletionKind completionKind = toYield->completionKind(); + RootedValue resumptionValue(cx, toYield->completionValue()); + + // Step 12.d. Return AsyncGeneratorUnwrapYieldResumption(resumptionValue). + return AsyncGeneratorUnwrapYieldResumptionAndResume( + cx, generator, completionKind, resumptionValue); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// Await in async function +// https://tc39.es/ecma262/#await +// +// Steps 3.c-f. +[[nodiscard]] static bool AsyncGeneratorAwaitedFulfilled( + JSContext* cx, Handle generator, HandleValue value) { + MOZ_ASSERT(generator->isExecuting(), + "Await fulfilled when not in 'Executing' state"); + + // Step 3.c. Push asyncContext onto the execution context stack; asyncContext + // is now the running execution context. + // Step 3.d. Resume the suspended evaluation of asyncContext using + // NormalCompletion(value) as the result of the operation that + // suspended it. + // Step 3.f. Return undefined. + return AsyncGeneratorResume(cx, generator, CompletionKind::Normal, value); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// Await in async function +// https://tc39.es/ecma262/#await +// +// Steps 5.c-f. +[[nodiscard]] static bool AsyncGeneratorAwaitedRejected( + JSContext* cx, Handle generator, + HandleValue reason) { + MOZ_ASSERT(generator->isExecuting(), + "Await rejected when not in 'Executing' state"); + + // Step 5.c. Push asyncContext onto the execution context stack; asyncContext + // is now the running execution context. + // Step 5.d. Resume the suspended evaluation of asyncContext using + // ThrowCompletion(reason) as the result of the operation that + // suspended it. + // Step 5.f. Return undefined. + return AsyncGeneratorResume(cx, generator, CompletionKind::Throw, reason); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// Await in async function +// https://tc39.es/ecma262/#await +[[nodiscard]] static bool AsyncGeneratorAwait( + JSContext* cx, Handle generator, HandleValue value) { + return InternalAsyncGeneratorAwait( + cx, generator, value, PromiseHandler::AsyncGeneratorAwaitedFulfilled, + PromiseHandler::AsyncGeneratorAwaitedRejected); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorCompleteStep ( generator, completion, done [ , realm ] ) +// https://tc39.es/ecma262/#sec-asyncgeneratorcompletestep +// +// "normal" case. +[[nodiscard]] static bool AsyncGeneratorCompleteStepNormal( + JSContext* cx, Handle generator, HandleValue value, + bool done) { + // Step 1. Let queue be generator.[[AsyncGeneratorQueue]]. + // Step 2. Assert: queue is not empty. + MOZ_ASSERT(!generator->isQueueEmpty()); + + // Step 3. Let next be the first element of queue. + // Step 4. Remove the first element from queue. + AsyncGeneratorRequest* next = + AsyncGeneratorObject::dequeueRequest(cx, generator); + if (!next) { + return false; + } + + // Step 5. Let promiseCapability be next.[[Capability]]. + Rooted resultPromise(cx, next->promise()); + + generator->cacheRequest(next); + + // Step 6. Let value be completion.[[Value]]. + // (passed by caller) + + // Step 7. If completion.[[Type]] is throw, then + // Step 8. Else, + // Step 8.a. Assert: completion.[[Type]] is normal. + + // Step 8.b. If realm is present, then + // (skipped) + // Step 8.c. Else, + + // Step 8.c.i. Let iteratorResult be ! CreateIterResultObject(value, done). + JSObject* resultObj = CreateIterResultObject(cx, value, done); + if (!resultObj) { + return false; + } + + // Step 8.d. Perform + // ! Call(promiseCapability.[[Resolve]], undefined, + // « iteratorResult »). + RootedValue resultValue(cx, ObjectValue(*resultObj)); + return ResolvePromiseInternal(cx, resultPromise, resultValue); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorCompleteStep ( generator, completion, done [ , realm ] ) +// https://tc39.es/ecma262/#sec-asyncgeneratorcompletestep +// +// "throw" case. +[[nodiscard]] static bool AsyncGeneratorCompleteStepThrow( + JSContext* cx, Handle generator, + HandleValue exception) { + // Step 1. Let queue be generator.[[AsyncGeneratorQueue]]. + // Step 2. Assert: queue is not empty. + MOZ_ASSERT(!generator->isQueueEmpty()); + + // Step 3. Let next be the first element of queue. + // Step 4. Remove the first element from queue. + AsyncGeneratorRequest* next = + AsyncGeneratorObject::dequeueRequest(cx, generator); + if (!next) { + return false; + } + + // Step 5. Let promiseCapability be next.[[Capability]]. + Rooted resultPromise(cx, next->promise()); + + generator->cacheRequest(next); + + // Step 6. Let value be completion.[[Value]]. + // (passed by caller) + + // Step 7. If completion.[[Type]] is throw, then + // Step 7.a. Perform + // ! Call(promiseCapability.[[Reject]], undefined, « value »). + return RejectPromiseInternal(cx, resultPromise, exception); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorAwaitReturn ( generator ) +// https://tc39.es/ecma262/#sec-asyncgeneratorawaitreturn +// +// Steps 7.a-e. +[[nodiscard]] static bool AsyncGeneratorAwaitReturnFulfilled( + JSContext* cx, Handle generator, HandleValue value) { + MOZ_ASSERT(generator->isAwaitingReturn(), + "AsyncGeneratorResumeNext-Return fulfilled when not in " + "'AwaitingReturn' state"); + + // Step 7.a. Set generator.[[AsyncGeneratorState]] to completed. + generator->setCompleted(); + + // Step 7.b. Let result be NormalCompletion(value). + // Step 7.c. Perform ! AsyncGeneratorCompleteStep(generator, result, true). + if (!AsyncGeneratorCompleteStepNormal(cx, generator, value, true)) { + return false; + } + + // Step 7.d. Perform ! AsyncGeneratorDrainQueue(generator). + // Step 7.e. Return undefined. + return AsyncGeneratorDrainQueue(cx, generator); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorAwaitReturn ( generator ) +// https://tc39.es/ecma262/#sec-asyncgeneratorawaitreturn +// +// Steps 9.a-e. +[[nodiscard]] static bool AsyncGeneratorAwaitReturnRejected( + JSContext* cx, Handle generator, HandleValue value) { + MOZ_ASSERT(generator->isAwaitingReturn(), + "AsyncGeneratorResumeNext-Return rejected when not in " + "'AwaitingReturn' state"); + + // Step 9.a. Set generator.[[AsyncGeneratorState]] to completed. + generator->setCompleted(); + + // Step 9.b. Let result be ThrowCompletion(reason). + // Step 9.c. Perform ! AsyncGeneratorCompleteStep(generator, result, true). + if (!AsyncGeneratorCompleteStepThrow(cx, generator, value)) { + return false; + } + + // Step 9.d. Perform ! AsyncGeneratorDrainQueue(generator). + // Step 9.e. Return undefined. + return AsyncGeneratorDrainQueue(cx, generator); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorAwaitReturn ( generator ) +// https://tc39.es/ecma262/#sec-asyncgeneratorawaitreturn +[[nodiscard]] static bool AsyncGeneratorAwaitReturn( + JSContext* cx, Handle generator, HandleValue next) { + // Step 1. Let queue be generator.[[AsyncGeneratorQueue]]. + // Step 2. Assert: queue is not empty. + MOZ_ASSERT(!generator->isQueueEmpty()); + + // Step 3. Let next be the first element of queue. + // (passed by caller) + + // Step 4. Let completion be next.[[Completion]]. + // Step 5. Assert: completion.[[Type]] is return. + // (implicit) + + // Steps 6-11. + return InternalAsyncGeneratorAwait( + cx, generator, next, PromiseHandler::AsyncGeneratorAwaitReturnFulfilled, + PromiseHandler::AsyncGeneratorAwaitReturnRejected); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorDrainQueue ( generator ) +// https://tc39.es/ecma262/#sec-asyncgeneratordrainqueue +[[nodiscard]] static bool AsyncGeneratorDrainQueue( + JSContext* cx, Handle generator) { + // Step 1. Assert: generator.[[AsyncGeneratorState]] is completed. + MOZ_ASSERT(generator->isCompleted()); + + // Step 2. Let queue be generator.[[AsyncGeneratorQueue]]. + // Step 3. If queue is empty, return. + if (generator->isQueueEmpty()) { + return true; + } + + // Step 4. Let done be false. + // (implicit) + + // Step 5. Repeat, while done is false, + while (true) { + // Step 5.a. Let next be the first element of queue. + Rooted next( + cx, AsyncGeneratorObject::peekRequest(generator)); + if (!next) { + return false; + } + + // Step 5.b. Let completion be next.[[Completion]]. + CompletionKind completionKind = next->completionKind(); + + // Step 5.c. If completion.[[Type]] is return, then + if (completionKind == CompletionKind::Return) { + RootedValue value(cx, next->completionValue()); + + // Step 5.c.i. Set generator.[[AsyncGeneratorState]] to awaiting-return. + generator->setAwaitingReturn(); + + // Step 5.c.ii. Perform ! AsyncGeneratorAwaitReturn(generator). + // Step 5.c.iii. Set done to true. + return AsyncGeneratorAwaitReturn(cx, generator, value); + } + + // Step 5.d. Else, + if (completionKind == CompletionKind::Throw) { + RootedValue value(cx, next->completionValue()); + + // Step 5.d.ii. Perform + // ! AsyncGeneratorCompleteStep(generator, completion, true). + if (!AsyncGeneratorCompleteStepThrow(cx, generator, value)) { + return false; + } + } else { + // Step 5.d.i. If completion.[[Type]] is normal, then + // Step 5.d.i.1. Set completion to NormalCompletion(undefined). + // Step 5.d.ii. Perform + // ! AsyncGeneratorCompleteStep(generator, completion, true). + if (!AsyncGeneratorCompleteStepNormal(cx, generator, UndefinedHandleValue, + true)) { + return false; + } + } + + // Step 5.d.iii. If queue is empty, set done to true. + if (generator->isQueueEmpty()) { + return true; + } + } +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorValidate ( generator, generatorBrand ) +// https://tc39.es/ecma262/#sec-asyncgeneratorvalidate +// +// Testing part. +[[nodiscard]] static bool IsAsyncGeneratorValid(HandleValue asyncGenVal) { + // Step 1. Perform + // ? RequireInternalSlot(generator, [[AsyncGeneratorContext]]). + // Step 2. Perform + // ? RequireInternalSlot(generator, [[AsyncGeneratorState]]). + // Step 3. Perform + // ? RequireInternalSlot(generator, [[AsyncGeneratorQueue]]). + // Step 4. If generator.[[GeneratorBrand]] is not the same value as + // generatorBrand, throw a TypeError exception. + return asyncGenVal.isObject() && + asyncGenVal.toObject().canUnwrapAs(); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorValidate ( generator, generatorBrand ) +// https://tc39.es/ecma262/#sec-asyncgeneratorvalidate +// +// Throwing part. +[[nodiscard]] static bool AsyncGeneratorValidateThrow( + JSContext* cx, MutableHandleValue result) { + Rooted resultPromise( + cx, CreatePromiseObjectForAsyncGenerator(cx)); + if (!resultPromise) { + return false; + } + + RootedValue badGeneratorError(cx); + if (!GetTypeError(cx, JSMSG_NOT_AN_ASYNC_GENERATOR, &badGeneratorError)) { + return false; + } + + if (!RejectPromiseInternal(cx, resultPromise, badGeneratorError)) { + return false; + } + + result.setObject(*resultPromise); + return true; +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorEnqueue ( generator, completion, promiseCapability ) +// https://tc39.es/ecma262/#sec-asyncgeneratorenqueue +[[nodiscard]] static bool AsyncGeneratorEnqueue( + JSContext* cx, Handle generator, + CompletionKind completionKind, HandleValue completionValue, + Handle resultPromise) { + // Step 1. Let request be + // AsyncGeneratorRequest { [[Completion]]: completion, + // [[Capability]]: promiseCapability }. + Rooted request( + cx, AsyncGeneratorObject::createRequest(cx, generator, completionKind, + completionValue, resultPromise)); + if (!request) { + return false; + } + + // Step 2. Append request to the end of generator.[[AsyncGeneratorQueue]]. + return AsyncGeneratorObject::enqueueRequest(cx, generator, request); +} + +class MOZ_STACK_CLASS MaybeEnterAsyncGeneratorRealm { + mozilla::Maybe ar_; + + public: + MaybeEnterAsyncGeneratorRealm() = default; + ~MaybeEnterAsyncGeneratorRealm() = default; + + // Enter async generator's realm, and wrap the method's argument value if + // necessary. + [[nodiscard]] bool maybeEnterAndWrap(JSContext* cx, + Handle generator, + MutableHandleValue value) { + if (generator->compartment() == cx->compartment()) { + return true; + } + + ar_.emplace(cx, generator); + return cx->compartment()->wrap(cx, value); + } + + // Leave async generator's realm, and wrap the method's result value if + // necessary. + [[nodiscard]] bool maybeLeaveAndWrap(JSContext* cx, + MutableHandleValue result) { + if (!ar_) { + return true; + } + ar_.reset(); + + return cx->compartment()->wrap(cx, result); + } +}; + +[[nodiscard]] static bool AsyncGeneratorMethodSanityCheck( + JSContext* cx, Handle generator) { + if (generator->isCompleted() || generator->isSuspendedStart() || + generator->isSuspendedYield()) { + // The spec assumes the queue is empty when async generator methods are + // called with those state, but our debugger allows calling those methods + // in unexpected state, such as before suspendedStart. + if (MOZ_UNLIKELY(!generator->isQueueEmpty())) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_SUSPENDED_QUEUE_NOT_EMPTY); + return false; + } + } + + return true; +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGenerator.prototype.next ( value ) +// https://tc39.es/ecma262/#sec-asyncgenerator-prototype-next +bool js::AsyncGeneratorNext(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Step 3. Let result be AsyncGeneratorValidate(generator, empty). + // Step 4. IfAbruptRejectPromise(result, promiseCapability). + // (reordered) + if (!IsAsyncGeneratorValid(args.thisv())) { + return AsyncGeneratorValidateThrow(cx, args.rval()); + } + + // Step 1. Let generator be the this value. + // (implicit) + Rooted generator( + cx, &args.thisv().toObject().unwrapAs()); + + MaybeEnterAsyncGeneratorRealm maybeEnterRealm; + + RootedValue completionValue(cx, args.get(0)); + if (!maybeEnterRealm.maybeEnterAndWrap(cx, generator, &completionValue)) { + return false; + } + + // Step 2. Let promiseCapability be ! NewPromiseCapability(%Promise%). + Rooted resultPromise( + cx, CreatePromiseObjectForAsyncGenerator(cx)); + if (!resultPromise) { + return false; + } + + if (!AsyncGeneratorMethodSanityCheck(cx, generator)) { + return false; + } + + // Step 5. Let state be generator.[[AsyncGeneratorState]]. + // Step 6. If state is completed, then + if (generator->isCompleted()) { + // Step 6.a. Let iteratorResult be + // ! CreateIterResultObject(undefined, true). + JSObject* resultObj = + CreateIterResultObject(cx, UndefinedHandleValue, true); + if (!resultObj) { + return false; + } + + // Step 6.b. Perform + // ! Call(promiseCapability.[[Resolve]], undefined, + // « iteratorResult »). + RootedValue resultValue(cx, ObjectValue(*resultObj)); + if (!ResolvePromiseInternal(cx, resultPromise, resultValue)) { + return false; + } + } else { + // Step 7. Let completion be NormalCompletion(value). + // Step 8. Perform + // ! AsyncGeneratorEnqueue(generator, completion, + // promiseCapability). + if (!AsyncGeneratorEnqueue(cx, generator, CompletionKind::Normal, + completionValue, resultPromise)) { + return false; + } + + // Step 9. If state is either suspendedStart or suspendedYield, then + if (generator->isSuspendedStart() || generator->isSuspendedYield()) { + RootedValue resumptionValue(cx, completionValue); + // Step 9.a. Perform ! AsyncGeneratorResume(generator, completion). + if (!AsyncGeneratorResume(cx, generator, CompletionKind::Normal, + resumptionValue)) { + return false; + } + } else { + // Step 10. Else, + // Step 10.a. Assert: state is either executing or awaiting-return. + MOZ_ASSERT(generator->isExecuting() || generator->isAwaitingReturn() || + generator->isAwaitingYieldReturn()); + } + } + + // Step 6.c. Return promiseCapability.[[Promise]]. + // and + // Step 11. Return promiseCapability.[[Promise]]. + args.rval().setObject(*resultPromise); + + return maybeEnterRealm.maybeLeaveAndWrap(cx, args.rval()); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGenerator.prototype.return ( value ) +// https://tc39.es/ecma262/#sec-asyncgenerator-prototype-return +bool js::AsyncGeneratorReturn(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Step 3. Let result be AsyncGeneratorValidate(generator, empty). + // Step 4. IfAbruptRejectPromise(result, promiseCapability). + // (reordered) + if (!IsAsyncGeneratorValid(args.thisv())) { + return AsyncGeneratorValidateThrow(cx, args.rval()); + } + + // Step 1. Let generator be the this value. + Rooted generator( + cx, &args.thisv().toObject().unwrapAs()); + + MaybeEnterAsyncGeneratorRealm maybeEnterRealm; + + RootedValue completionValue(cx, args.get(0)); + if (!maybeEnterRealm.maybeEnterAndWrap(cx, generator, &completionValue)) { + return false; + } + + // Step 2. Let promiseCapability be ! NewPromiseCapability(%Promise%). + Rooted resultPromise( + cx, CreatePromiseObjectForAsyncGenerator(cx)); + if (!resultPromise) { + return false; + } + + if (!AsyncGeneratorMethodSanityCheck(cx, generator)) { + return false; + } + + // Step 5. Let completion be + // Completion { [[Type]]: return, [[Value]]: value, + // [[Target]]: empty }. + // Step 6. Perform + // ! AsyncGeneratorEnqueue(generator, completion, promiseCapability). + if (!AsyncGeneratorEnqueue(cx, generator, CompletionKind::Return, + completionValue, resultPromise)) { + return false; + } + + // Step 7. Let state be generator.[[AsyncGeneratorState]]. + // Step 8. If state is either suspendedStart or completed, then + if (generator->isSuspendedStart() || generator->isCompleted()) { + // Step 8.a. Set generator.[[AsyncGeneratorState]] to awaiting-return. + generator->setAwaitingReturn(); + + // Step 8.b. Perform ! AsyncGeneratorAwaitReturn(generator). + if (!AsyncGeneratorAwaitReturn(cx, generator, completionValue)) { + return false; + } + } else if (generator->isSuspendedYield()) { + // Step 9. Else if state is suspendedYield, then + + // Step 9.a. Perform ! AsyncGeneratorResume(generator, completion). + if (!AsyncGeneratorUnwrapYieldResumptionAndResume( + cx, generator, CompletionKind::Return, completionValue)) { + return false; + } + } else { + // Step 10. Else, + // Step 10.a. Assert: state is either executing or awaiting-return. + MOZ_ASSERT(generator->isExecuting() || generator->isAwaitingReturn() || + generator->isAwaitingYieldReturn()); + } + + // Step 11. Return promiseCapability.[[Promise]]. + args.rval().setObject(*resultPromise); + + return maybeEnterRealm.maybeLeaveAndWrap(cx, args.rval()); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGenerator.prototype.throw ( exception ) +// https://tc39.es/ecma262/#sec-asyncgenerator-prototype-throw +bool js::AsyncGeneratorThrow(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Step 3. Let result be AsyncGeneratorValidate(generator, empty). + // Step 4. IfAbruptRejectPromise(result, promiseCapability). + // (reordered) + if (!IsAsyncGeneratorValid(args.thisv())) { + return AsyncGeneratorValidateThrow(cx, args.rval()); + } + + // Step 1. Let generator be the this value. + Rooted generator( + cx, &args.thisv().toObject().unwrapAs()); + + MaybeEnterAsyncGeneratorRealm maybeEnterRealm; + + RootedValue completionValue(cx, args.get(0)); + if (!maybeEnterRealm.maybeEnterAndWrap(cx, generator, &completionValue)) { + return false; + } + + // Step 2. Let promiseCapability be ! NewPromiseCapability(%Promise%). + Rooted resultPromise( + cx, CreatePromiseObjectForAsyncGenerator(cx)); + if (!resultPromise) { + return false; + } + + if (!AsyncGeneratorMethodSanityCheck(cx, generator)) { + return false; + } + + // Step 5. Let state be generator.[[AsyncGeneratorState]]. + // Step 6. If state is suspendedStart, then + if (generator->isSuspendedStart()) { + // Step 6.a. Set generator.[[AsyncGeneratorState]] to completed. + // Step 6.b. Set state to completed. + generator->setCompleted(); + } + + // Step 7. If state is completed, then + if (generator->isCompleted()) { + // Step 7.a. Perform + // ! Call(promiseCapability.[[Reject]], undefined, « exception »). + if (!RejectPromiseInternal(cx, resultPromise, completionValue)) { + return false; + } + } else { + // Step 8. Let completion be ThrowCompletion(exception). + // Step 9. Perform + // ! AsyncGeneratorEnqueue(generator, completion, + // promiseCapability). + if (!AsyncGeneratorEnqueue(cx, generator, CompletionKind::Throw, + completionValue, resultPromise)) { + return false; + } + + // Step 10. If state is suspendedYield, then + if (generator->isSuspendedYield()) { + // Step 10.a. Perform ! AsyncGeneratorResume(generator, completion). + if (!AsyncGeneratorResume(cx, generator, CompletionKind::Throw, + completionValue)) { + return false; + } + } else { + // Step 11. Else, + // Step 11.a. Assert: state is either executing or awaiting-return. + MOZ_ASSERT(generator->isExecuting() || generator->isAwaitingReturn() || + generator->isAwaitingYieldReturn()); + } + } + + // Step 7.b. Return promiseCapability.[[Promise]]. + // and + // Step 12. Return promiseCapability.[[Promise]]. + args.rval().setObject(*resultPromise); + + return maybeEnterRealm.maybeLeaveAndWrap(cx, args.rval()); +} + +// ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 +// +// AsyncGeneratorResume ( generator, completion ) +// https://tc39.es/ecma262/#sec-asyncgeneratorresume +[[nodiscard]] static bool AsyncGeneratorResume( + JSContext* cx, Handle generator, + CompletionKind completionKind, HandleValue argument) { + MOZ_ASSERT(!generator->isClosed(), + "closed generator when resuming async generator"); + MOZ_ASSERT(generator->isSuspended(), + "non-suspended generator when resuming async generator"); + + // Step 1. Assert: generator.[[AsyncGeneratorState]] is either + // suspendedStart or suspendedYield. + // + // NOTE: We're using suspend/resume also for await. and the state can be + // anything. + + // Steps 2-4 are handled in generator. + + // Step 5. Set generator.[[AsyncGeneratorState]] to executing. + generator->setExecuting(); + + // Step 6. Push genContext onto the execution context stack; genContext is + // now the running execution context. + // Step 7. Resume the suspended evaluation of genContext using completion as + // the result of the operation that suspended it. Let result be the + // completion record returned by the resumed computation. + Handle funName = completionKind == CompletionKind::Normal + ? cx->names().AsyncGeneratorNext + : completionKind == CompletionKind::Throw + ? cx->names().AsyncGeneratorThrow + : cx->names().AsyncGeneratorReturn; + FixedInvokeArgs<1> args(cx); + args[0].set(argument); + RootedValue thisOrRval(cx, ObjectValue(*generator)); + if (!CallSelfHostedFunction(cx, funName, thisOrRval, args, &thisOrRval)) { + // 25.5.3.2, steps 5.f, 5.g. + if (!generator->isClosed()) { + generator->setClosed(); + } + return AsyncGeneratorThrown(cx, generator); + } + + // 6.2.3.1, steps 2-9. + if (generator->isAfterAwait()) { + return AsyncGeneratorAwait(cx, generator, thisOrRval); + } + + // 25.5.3.7, steps 5-6, 9. + if (generator->isAfterYield()) { + return AsyncGeneratorYield(cx, generator, thisOrRval); + } + + // 25.5.3.2, steps 5.d-g. + return AsyncGeneratorReturned(cx, generator, thisOrRval); +} + +static const JSFunctionSpec async_generator_methods[] = { + JS_FN("next", js::AsyncGeneratorNext, 1, 0), + JS_FN("throw", js::AsyncGeneratorThrow, 1, 0), + JS_FN("return", js::AsyncGeneratorReturn, 1, 0), JS_FS_END}; + +static JSObject* CreateAsyncGeneratorFunction(JSContext* cx, JSProtoKey key) { + RootedObject proto(cx, &cx->global()->getFunctionConstructor()); + Handle name = cx->names().AsyncGeneratorFunction; + + // ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 + // + // The AsyncGeneratorFunction Constructor + // https://tc39.es/ecma262/#sec-asyncgeneratorfunction-constructor + return NewFunctionWithProto(cx, AsyncGeneratorConstructor, 1, + FunctionFlags::NATIVE_CTOR, nullptr, name, proto, + gc::AllocKind::FUNCTION, TenuredObject); +} + +static JSObject* CreateAsyncGeneratorFunctionPrototype(JSContext* cx, + JSProtoKey key) { + return NewTenuredObjectWithFunctionPrototype(cx, cx->global()); +} + +static bool AsyncGeneratorFunctionClassFinish(JSContext* cx, + HandleObject asyncGenFunction, + HandleObject asyncGenerator) { + Handle global = cx->global(); + + // Change the "constructor" property to non-writable before adding any other + // properties, so it's still the last property and can be modified without a + // dictionary-mode transition. + MOZ_ASSERT(asyncGenerator->as().getLastProperty().key() == + NameToId(cx->names().constructor)); + MOZ_ASSERT(!asyncGenerator->as().inDictionaryMode()); + + RootedValue asyncGenFunctionVal(cx, ObjectValue(*asyncGenFunction)); + if (!DefineDataProperty(cx, asyncGenerator, cx->names().constructor, + asyncGenFunctionVal, JSPROP_READONLY)) { + return false; + } + MOZ_ASSERT(!asyncGenerator->as().inDictionaryMode()); + + RootedObject asyncIterProto( + cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global)); + if (!asyncIterProto) { + return false; + } + + // ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 + // + // AsyncGenerator Objects + // https://tc39.es/ecma262/#sec-asyncgenerator-objects + RootedObject asyncGenProto(cx, GlobalObject::createBlankPrototypeInheriting( + cx, &PlainObject::class_, asyncIterProto)); + if (!asyncGenProto) { + return false; + } + if (!DefinePropertiesAndFunctions(cx, asyncGenProto, nullptr, + async_generator_methods) || + !DefineToStringTag(cx, asyncGenProto, cx->names().AsyncGenerator)) { + return false; + } + + // ES2022 draft rev 193211a3d889a61e74ef7da1475dfa356e029f29 + // + // Properties of the AsyncGeneratorFunction Prototype Object + // https://tc39.es/ecma262/#sec-properties-of-asyncgeneratorfunction-prototype + if (!LinkConstructorAndPrototype(cx, asyncGenerator, asyncGenProto, + JSPROP_READONLY, JSPROP_READONLY) || + !DefineToStringTag(cx, asyncGenerator, + cx->names().AsyncGeneratorFunction)) { + return false; + } + + global->setAsyncGeneratorPrototype(asyncGenProto); + + return true; +} + +static const ClassSpec AsyncGeneratorFunctionClassSpec = { + CreateAsyncGeneratorFunction, + CreateAsyncGeneratorFunctionPrototype, + nullptr, + nullptr, + nullptr, + nullptr, + AsyncGeneratorFunctionClassFinish, + ClassSpec::DontDefineConstructor}; + +const JSClass js::AsyncGeneratorFunctionClass = { + "AsyncGeneratorFunction", 0, JS_NULL_CLASS_OPS, + &AsyncGeneratorFunctionClassSpec}; + +[[nodiscard]] bool js::AsyncGeneratorPromiseReactionJob( + JSContext* cx, PromiseHandler handler, + Handle generator, HandleValue argument) { + // Await's handlers don't return a value, nor throw any exceptions. + // They fail only on OOM. + switch (handler) { + case PromiseHandler::AsyncGeneratorAwaitedFulfilled: + return AsyncGeneratorAwaitedFulfilled(cx, generator, argument); + + case PromiseHandler::AsyncGeneratorAwaitedRejected: + return AsyncGeneratorAwaitedRejected(cx, generator, argument); + + case PromiseHandler::AsyncGeneratorAwaitReturnFulfilled: + return AsyncGeneratorAwaitReturnFulfilled(cx, generator, argument); + + case PromiseHandler::AsyncGeneratorAwaitReturnRejected: + return AsyncGeneratorAwaitReturnRejected(cx, generator, argument); + + case PromiseHandler::AsyncGeneratorYieldReturnAwaitedFulfilled: + return AsyncGeneratorYieldReturnAwaitedFulfilled(cx, generator, argument); + + case PromiseHandler::AsyncGeneratorYieldReturnAwaitedRejected: + return AsyncGeneratorYieldReturnAwaitedRejected(cx, generator, argument); + + default: + MOZ_CRASH("Bad handler in AsyncGeneratorPromiseReactionJob"); + } +} + +// --------------------- +// AsyncFromSyncIterator +// --------------------- + +const JSClass AsyncFromSyncIteratorObject::class_ = { + "AsyncFromSyncIteratorObject", + JSCLASS_HAS_RESERVED_SLOTS(AsyncFromSyncIteratorObject::Slots)}; + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.1 CreateAsyncFromSyncIterator +JSObject* js::CreateAsyncFromSyncIterator(JSContext* cx, HandleObject iter, + HandleValue nextMethod) { + // Steps 1-3. + return AsyncFromSyncIteratorObject::create(cx, iter, nextMethod); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.1 CreateAsyncFromSyncIterator +/* static */ +JSObject* AsyncFromSyncIteratorObject::create(JSContext* cx, HandleObject iter, + HandleValue nextMethod) { + // Step 1. + RootedObject proto(cx, + GlobalObject::getOrCreateAsyncFromSyncIteratorPrototype( + cx, cx->global())); + if (!proto) { + return nullptr; + } + + AsyncFromSyncIteratorObject* asyncIter = + NewObjectWithGivenProto(cx, proto); + if (!asyncIter) { + return nullptr; + } + + // Step 2. + asyncIter->init(iter, nextMethod); + + // Step 3 (Call to 7.4.1 GetIterator). + // 7.4.1 GetIterator, steps 1-5 are a no-op (*). + // 7.4.1 GetIterator, steps 6-8 are implemented in bytecode. + // + // (*) With fixed. + return asyncIter; +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.2.1 %AsyncFromSyncIteratorPrototype%.next +static bool AsyncFromSyncIteratorNext(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Normal); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.2.2 %AsyncFromSyncIteratorPrototype%.return +static bool AsyncFromSyncIteratorReturn(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Return); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.2.3 %AsyncFromSyncIteratorPrototype%.throw +static bool AsyncFromSyncIteratorThrow(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Throw); +} + +static const JSFunctionSpec async_from_sync_iter_methods[] = { + JS_FN("next", AsyncFromSyncIteratorNext, 1, 0), + JS_FN("throw", AsyncFromSyncIteratorThrow, 1, 0), + JS_FN("return", AsyncFromSyncIteratorReturn, 1, 0), JS_FS_END}; + +bool GlobalObject::initAsyncFromSyncIteratorProto( + JSContext* cx, Handle global) { + if (global->hasBuiltinProto(ProtoKind::AsyncFromSyncIteratorProto)) { + return true; + } + + RootedObject asyncIterProto( + cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global)); + if (!asyncIterProto) { + return false; + } + + // 25.1.4.2 The %AsyncFromSyncIteratorPrototype% Object + RootedObject asyncFromSyncIterProto( + cx, GlobalObject::createBlankPrototypeInheriting(cx, &PlainObject::class_, + asyncIterProto)); + if (!asyncFromSyncIterProto) { + return false; + } + if (!DefinePropertiesAndFunctions(cx, asyncFromSyncIterProto, nullptr, + async_from_sync_iter_methods) || + !DefineToStringTag(cx, asyncFromSyncIterProto, + cx->names().AsyncFromSyncIterator)) { + return false; + } + + global->initBuiltinProto(ProtoKind::AsyncFromSyncIteratorProto, + asyncFromSyncIterProto); + return true; +} + +// ------------- +// AsyncIterator +// ------------- + +static const JSFunctionSpec async_iterator_proto_methods[] = { + JS_SELF_HOSTED_SYM_FN(asyncIterator, "AsyncIteratorIdentity", 0, 0), + JS_FS_END}; + +static const JSFunctionSpec async_iterator_proto_methods_with_helpers[] = { + JS_SELF_HOSTED_FN("map", "AsyncIteratorMap", 1, 0), + JS_SELF_HOSTED_FN("filter", "AsyncIteratorFilter", 1, 0), + JS_SELF_HOSTED_FN("take", "AsyncIteratorTake", 1, 0), + JS_SELF_HOSTED_FN("drop", "AsyncIteratorDrop", 1, 0), + JS_SELF_HOSTED_FN("asIndexedPairs", "AsyncIteratorAsIndexedPairs", 0, 0), + JS_SELF_HOSTED_FN("flatMap", "AsyncIteratorFlatMap", 1, 0), + JS_SELF_HOSTED_FN("reduce", "AsyncIteratorReduce", 1, 0), + JS_SELF_HOSTED_FN("toArray", "AsyncIteratorToArray", 0, 0), + JS_SELF_HOSTED_FN("forEach", "AsyncIteratorForEach", 1, 0), + JS_SELF_HOSTED_FN("some", "AsyncIteratorSome", 1, 0), + JS_SELF_HOSTED_FN("every", "AsyncIteratorEvery", 1, 0), + JS_SELF_HOSTED_FN("find", "AsyncIteratorFind", 1, 0), + JS_SELF_HOSTED_SYM_FN(asyncIterator, "AsyncIteratorIdentity", 0, 0), + JS_FS_END}; + +bool GlobalObject::initAsyncIteratorProto(JSContext* cx, + Handle global) { + if (global->hasBuiltinProto(ProtoKind::AsyncIteratorProto)) { + return true; + } + + // 25.1.3 The %AsyncIteratorPrototype% Object + RootedObject asyncIterProto( + cx, GlobalObject::createBlankPrototype(cx, global)); + if (!asyncIterProto) { + return false; + } + if (!DefinePropertiesAndFunctions(cx, asyncIterProto, nullptr, + async_iterator_proto_methods)) { + return false; + } + + global->initBuiltinProto(ProtoKind::AsyncIteratorProto, asyncIterProto); + return true; +} + +// https://tc39.es/proposal-iterator-helpers/#sec-asynciterator as of revision +// 8f10db5. +static bool AsyncIteratorConstructor(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Step 1. + if (!ThrowIfNotConstructing(cx, args, js_AsyncIterator_str)) { + return false; + } + // Throw TypeError if NewTarget is the active function object, preventing the + // Iterator constructor from being used directly. + if (args.callee() == args.newTarget().toObject()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BOGUS_CONSTRUCTOR, js_AsyncIterator_str); + return false; + } + + // Step 2. + RootedObject proto(cx); + if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_AsyncIterator, + &proto)) { + return false; + } + + JSObject* obj = NewObjectWithClassProto(cx, proto); + if (!obj) { + return false; + } + + args.rval().setObject(*obj); + return true; +} + +static const ClassSpec AsyncIteratorObjectClassSpec = { + GenericCreateConstructor, + GenericCreatePrototype, + nullptr, + nullptr, + async_iterator_proto_methods_with_helpers, + nullptr, + nullptr, +}; + +const JSClass AsyncIteratorObject::class_ = { + js_AsyncIterator_str, + JSCLASS_HAS_CACHED_PROTO(JSProto_AsyncIterator), + JS_NULL_CLASS_OPS, + &AsyncIteratorObjectClassSpec, +}; + +const JSClass AsyncIteratorObject::protoClass_ = { + "AsyncIterator.prototype", + JSCLASS_HAS_CACHED_PROTO(JSProto_AsyncIterator), + JS_NULL_CLASS_OPS, + &AsyncIteratorObjectClassSpec, +}; + +// Iterator Helper proposal +static const JSFunctionSpec async_iterator_helper_methods[] = { + JS_SELF_HOSTED_FN("next", "AsyncIteratorHelperNext", 1, 0), + JS_SELF_HOSTED_FN("return", "AsyncIteratorHelperReturn", 1, 0), + JS_SELF_HOSTED_FN("throw", "AsyncIteratorHelperThrow", 1, 0), + JS_FS_END, +}; + +static const JSClass AsyncIteratorHelperPrototypeClass = { + "Async Iterator Helper", 0}; + +const JSClass AsyncIteratorHelperObject::class_ = { + "Async Iterator Helper", + JSCLASS_HAS_RESERVED_SLOTS(AsyncIteratorHelperObject::SlotCount), +}; + +/* static */ +NativeObject* GlobalObject::getOrCreateAsyncIteratorHelperPrototype( + JSContext* cx, Handle global) { + return MaybeNativeObject( + getOrCreateBuiltinProto(cx, global, ProtoKind::AsyncIteratorHelperProto, + initAsyncIteratorHelperProto)); +} + +/* static */ +bool GlobalObject::initAsyncIteratorHelperProto(JSContext* cx, + Handle global) { + if (global->hasBuiltinProto(ProtoKind::AsyncIteratorHelperProto)) { + return true; + } + + RootedObject asyncIterProto( + cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global)); + if (!asyncIterProto) { + return false; + } + + RootedObject asyncIteratorHelperProto( + cx, GlobalObject::createBlankPrototypeInheriting( + cx, &AsyncIteratorHelperPrototypeClass, asyncIterProto)); + if (!asyncIteratorHelperProto) { + return false; + } + if (!DefinePropertiesAndFunctions(cx, asyncIteratorHelperProto, nullptr, + async_iterator_helper_methods)) { + return false; + } + + global->initBuiltinProto(ProtoKind::AsyncIteratorHelperProto, + asyncIteratorHelperProto); + return true; +} + +AsyncIteratorHelperObject* js::NewAsyncIteratorHelper(JSContext* cx) { + RootedObject proto(cx, GlobalObject::getOrCreateAsyncIteratorHelperPrototype( + cx, cx->global())); + if (!proto) { + return nullptr; + } + return NewObjectWithGivenProto(cx, proto); +} diff --git a/js/src/vm/AsyncIteration.h b/js/src/vm/AsyncIteration.h new file mode 100644 index 0000000000..4629329cc8 --- /dev/null +++ b/js/src/vm/AsyncIteration.h @@ -0,0 +1,571 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_AsyncIteration_h +#define vm_AsyncIteration_h + +#include "builtin/Promise.h" // js::PromiseHandler +#include "builtin/SelfHostingDefines.h" +#include "js/Class.h" +#include "vm/GeneratorObject.h" +#include "vm/JSObject.h" +#include "vm/List.h" +#include "vm/PromiseObject.h" + +// [SMDOC] Async generators +// +// # Start +// +// When an async generator is called, it synchronously runs until the +// JSOp::InitialYield and then suspends, just like a sync generator, and returns +// an async generator object (js::AsyncGeneratorObject). +// +// +// # Request queue +// +// When next/return/throw is called on the async generator object, +// js::AsyncGeneratorEnqueue performs the following: +// * Create a new AsyncGeneratorRequest and enqueue it in the generator +// object's request queue. +// * Resume the generator with the oldest request, if the generator is +// suspended (see "Resume" section below) +// * Return the promise for the request +// +// This is done in js::AsyncGeneratorEnqueue, which corresponds to +// AsyncGeneratorEnqueue in the spec, +// and js::AsyncGeneratorResumeNext corresponds to the following: +// * AsyncGeneratorResolve +// * AsyncGeneratorReject +// * AsyncGeneratorResumeNext +// +// The returned promise is resolved when the resumption for the request +// completes with yield/throw/return, in js::AsyncGeneratorResolve and +// js::AsyncGeneratorReject. +// They correspond to AsyncGeneratorResolve and AsyncGeneratorReject in the +// spec. +// +// +// # Await +// +// Async generator's `await` is implemented differently than async function's +// `await`. +// +// The bytecode is the following: +// (ignoring CanSkipAwait; see the comment in AsyncFunction.h for more details) +// +// ``` +// (operand here) # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// Await 0 # RVAL GENERATOR RESUMEKIND +// +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// ``` +// +// Async generators don't use JSOp::AsyncAwait, and that part is handled +// in js::AsyncGeneratorResume, and js::AsyncGeneratorAwait called there. +// +// Both JSOp::Await and JSOp::Yield behave in the exactly same way, +// and js::AsyncGeneratorResume checks the last opcode and branches for +// await/yield/return cases. +// +// +// # Reaction jobs and resume after await +// +// This is almost same as for async functions (see AsyncFunction.h). +// +// The reaction record for the job is marked as "this is for async generator" +// (see js::AsyncGeneratorAwait), and handled specially in +// js::PromiseReactionJob, which calls js::AsyncGeneratorPromiseReactionJob. +// +// +// # Yield +// +// `yield` is implemented with the following bytecode sequence: +// (Ignoring CanSkipAwait for simplicity) +// +// ``` +// (operand here) # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// Await 1 # RVAL GENERATOR RESUMEKIND +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// +// GetAliasedVar ".generator" # RVAL .generator +// Yield 2 # RVAL2 GENERATOR RESUMEKIND +// +// AfterYield # RVAL2 GENERATOR RESUMEKIND +// CheckResumeKind # RVAL2 +// ``` +// +// The 1st part (JSOp::Await + JSOp::CheckResumeKind) performs an implicit +// `await`, as specified in AsyncGeneratorYield step 5. +// +// AsyncGeneratorYield ( value ) +// https://tc39.es/ecma262/#sec-asyncgeneratoryield +// +// 5. Set value to ? Await(value). +// +// The 2nd part (JSOp::Yield) suspends execution and yields the result of +// `await`, as specified in AsyncGeneratorYield steps 1-4, 6-7, 9-10. +// +// AsyncGeneratorYield ( value ) +// https://tc39.es/ecma262/#sec-asyncgeneratoryield +// +// 1. Let genContext be the running execution context. +// 2. Assert: genContext is the execution context of a generator. +// 3. Let generator be the value of the Generator component of genContext. +// 4. Assert: GetGeneratorKind() is async. +// .. +// 6. Set generator.[[AsyncGeneratorState]] to suspendedYield. +// 7. Remove genContext from the execution context stack and restore the +// execution context that is at the top of the execution context stack as +// the running execution context. +// 8. ... +// 9. Return ! AsyncGeneratorResolve(generator, value, false). +// 10. NOTE: This returns to the evaluation of the operation that had most +// previously resumed evaluation of genContext. +// +// The last part (JSOp::CheckResumeKind) checks the resumption type and +// resumes/throws/returns the execution, as specified in AsyncGeneratorYield +// step 8. +// +// 8. Set the code evaluation state of genContext such that when evaluation is +// resumed with a Completion resumptionValue the following steps will be +// performed: +// a. If resumptionValue.[[Type]] is not return, return +// Completion(resumptionValue). +// b. Let awaited be Await(resumptionValue.[[Value]]). +// c. If awaited.[[Type]] is throw, return Completion(awaited). +// d. Assert: awaited.[[Type]] is normal. +// e. Return Completion { [[Type]]: return, [[Value]]: awaited.[[Value]], +// [[Target]]: empty }. +// f. NOTE: When one of the above steps returns, it returns to the +// evaluation of the YieldExpression production that originally called +// this abstract operation. +// +// Resumption with `AsyncGenerator.prototype.return` is handled differently. +// See "Resumption with return" section below. +// +// +// # Return +// +// `return` with operand is implemented with the following bytecode sequence: +// (Ignoring CanSkipAwait for simplicity) +// +// ``` +// (operand here) # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// Await 0 # RVAL GENERATOR RESUMEKIND +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// ``` +// +// The 1st part (JSOp::Await + JSOp::CheckResumeKind) performs implicit +// `await`, as specified in ReturnStatement's Evaluation step 3. +// +// ReturnStatement: return Expression; +// https://tc39.es/ecma262/#sec-return-statement-runtime-semantics-evaluation +// +// 3. If ! GetGeneratorKind() is async, set exprValue to ? Await(exprValue). +// +// And the 2nd part corresponds to AsyncGeneratorStart steps 5.a-e, 5.g. +// +// AsyncGeneratorStart ( generator, generatorBody ) +// https://tc39.es/ecma262/#sec-asyncgeneratorstart +// +// 5. Set the code evaluation state of genContext such that when evaluation +// is resumed for that execution context the following steps will be +// performed: +// a. Let result be the result of evaluating generatorBody. +// b. Assert: If we return here, the async generator either threw an +// exception or performed either an implicit or explicit return. +// c. Remove genContext from the execution context stack and restore the +// execution context that is at the top of the execution context stack +// as the running execution context. +// d. Set generator.[[AsyncGeneratorState]] to completed. +// e. If result is a normal completion, let resultValue be undefined. +// ... +// g. Return ! AsyncGeneratorResolve(generator, resultValue, true). +// +// `return` without operand or implicit return is implicit with the following +// bytecode sequence: +// +// ``` +// Undefined # undefined +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// ``` +// +// This is also AsyncGeneratorStart steps 5.a-e, 5.g. +// +// +// # Throw +// +// Unlike async function, async generator doesn't use implicit try-catch, +// but the throw completion is handled by js::AsyncGeneratorResume, +// and js::AsyncGeneratorThrown is called there. +// +// 5. ... +// f. Else, +// i. Let resultValue be result.[[Value]]. +// ii. If result.[[Type]] is not return, then +// 1. Return ! AsyncGeneratorReject(generator, resultValue). +// +// +// # Resumption with return +// +// Resumption with return completion is handled in js::AsyncGeneratorResumeNext. +// +// If the generator is suspended, it doesn't immediately resume the generator +// script itself, but handles implicit `await` it in +// js::AsyncGeneratorResumeNext. +// (See PromiseHandlerAsyncGeneratorYieldReturnAwaitedFulfilled and +// PromiseHandlerAsyncGeneratorYieldReturnAwaitedRejected), and resumes the +// generator with the result of await. +// And the return completion is finally handled in JSOp::CheckResumeKind +// after JSOp::Yield. +// +// This corresponds to AsyncGeneratorYield step 8. +// +// AsyncGeneratorYield ( value ) +// https://tc39.es/ecma262/#sec-asyncgeneratoryield +// +// 8. Set the code evaluation state of genContext such that when evaluation +// is resumed with a Completion resumptionValue the following steps will +// be performed: +// .. +// b. Let awaited be Await(resumptionValue.[[Value]]). +// c. If awaited.[[Type]] is throw, return Completion(awaited). +// d. Assert: awaited.[[Type]] is normal. +// e. Return Completion { [[Type]]: return, [[Value]]: awaited.[[Value]], +// [[Target]]: empty }. +// +// If the generator is already completed, it awaits on the return value, +// (See PromiseHandlerAsyncGeneratorResumeNextReturnFulfilled and +// PromiseHandlerAsyncGeneratorResumeNextReturnRejected), and resolves the +// request's promise with the value. +// +// It corresponds to AsyncGeneratorResumeNext step 10.b.i. +// +// AsyncGeneratorResumeNext ( generator ) +// https://tc39.es/ecma262/#sec-asyncgeneratorresumenext +// +// 10. If completion is an abrupt completion, then +// .. +// b. If state is completed, then +// i. If completion.[[Type]] is return, then +// 1. Set generator.[[AsyncGeneratorState]] to awaiting-return. +// 2. Let promise be ? PromiseResolve(%Promise%, completion.[[Value]]). +// 3. Let stepsFulfilled be the algorithm steps defined in +// AsyncGeneratorResumeNext Return Processor Fulfilled Functions. +// 4. Let onFulfilled be ! CreateBuiltinFunction(stepsFulfilled, « +// [[Generator]] »). +// 5. Set onFulfilled.[[Generator]] to generator. +// 6. Let stepsRejected be the algorithm steps defined in +// AsyncGeneratorResumeNext Return Processor Rejected Functions. +// 7. Let onRejected be ! CreateBuiltinFunction(stepsRejected, « +// [[Generator]] »). +// 8. Set onRejected.[[Generator]] to generator. +// 9. Perform ! PerformPromiseThen(promise, onFulfilled, onRejected). +// 10. Return undefined. +// + +namespace js { + +class AsyncGeneratorObject; +enum class CompletionKind : uint8_t; + +extern const JSClass AsyncGeneratorFunctionClass; + +[[nodiscard]] bool AsyncGeneratorPromiseReactionJob( + JSContext* cx, PromiseHandler handler, + Handle generator, HandleValue argument); + +bool AsyncGeneratorNext(JSContext* cx, unsigned argc, Value* vp); +bool AsyncGeneratorReturn(JSContext* cx, unsigned argc, Value* vp); +bool AsyncGeneratorThrow(JSContext* cx, unsigned argc, Value* vp); + +// AsyncGeneratorRequest record in the spec. +// Stores the info from AsyncGenerator#{next,return,throw}. +// +// This object is reused across multiple requests as an optimization, and +// stored in the Slot_CachedRequest slot. +class AsyncGeneratorRequest : public NativeObject { + private: + enum AsyncGeneratorRequestSlots { + // Int32 value with CompletionKind. + // Normal: next + // Return: return + // Throw: throw + Slot_CompletionKind = 0, + + // The value passed to AsyncGenerator#{next,return,throw}. + Slot_CompletionValue, + + // The promise returned by AsyncGenerator#{next,return,throw}. + Slot_Promise, + + Slots, + }; + + void init(CompletionKind completionKind, const Value& completionValue, + PromiseObject* promise) { + setFixedSlot(Slot_CompletionKind, + Int32Value(static_cast(completionKind))); + setFixedSlot(Slot_CompletionValue, completionValue); + setFixedSlot(Slot_Promise, ObjectValue(*promise)); + } + + // Clear the request data for reuse. + void clearData() { + setFixedSlot(Slot_CompletionValue, NullValue()); + setFixedSlot(Slot_Promise, NullValue()); + } + + friend AsyncGeneratorObject; + + public: + static const JSClass class_; + + static AsyncGeneratorRequest* create(JSContext* cx, + CompletionKind completionKind, + HandleValue completionValue, + Handle promise); + + CompletionKind completionKind() const { + return static_cast( + getFixedSlot(Slot_CompletionKind).toInt32()); + } + JS::Value completionValue() const { + return getFixedSlot(Slot_CompletionValue); + } + PromiseObject* promise() const { + return &getFixedSlot(Slot_Promise).toObject().as(); + } +}; + +class AsyncGeneratorObject : public AbstractGeneratorObject { + private: + enum AsyncGeneratorObjectSlots { + // Int32 value containing one of the |State| fields from below. + Slot_State = AbstractGeneratorObject::RESERVED_SLOTS, + + // * null value if this async generator has no requests + // * AsyncGeneratorRequest if this async generator has only one request + // * list object if this async generator has 2 or more requests + Slot_QueueOrRequest, + + // Cached AsyncGeneratorRequest for later use. + // undefined if there's no cache. + Slot_CachedRequest, + + Slots + }; + + public: + enum State { + // "suspendedStart" in the spec. + // Suspended after invocation. + State_SuspendedStart, + + // "suspendedYield" in the spec + // Suspended with `yield` expression. + State_SuspendedYield, + + // "executing" in the spec. + // Resumed from initial suspend or yield, and either running the script + // or awaiting for `await` expression. + State_Executing, + + // Part of "executing" in the spec. + // Awaiting on the value passed by AsyncGenerator#return which is called + // while executing. + State_AwaitingYieldReturn, + + // "awaiting-return" in the spec. + // Awaiting on the value passed by AsyncGenerator#return which is called + // after completed. + State_AwaitingReturn, + + // "completed" in the spec. + // The generator is completed. + State_Completed + }; + + State state() const { + return static_cast(getFixedSlot(Slot_State).toInt32()); + } + void setState(State state_) { setFixedSlot(Slot_State, Int32Value(state_)); } + + private: + // Queue is implemented in 2 ways. If only one request is queued ever, + // request is stored directly to the slot. Once 2 requests are queued, a + // list is created and requests are appended into it, and the list is + // stored to the slot. + + bool isSingleQueue() const { + return getFixedSlot(Slot_QueueOrRequest).isNull() || + getFixedSlot(Slot_QueueOrRequest) + .toObject() + .is(); + } + bool isSingleQueueEmpty() const { + return getFixedSlot(Slot_QueueOrRequest).isNull(); + } + void setSingleQueueRequest(AsyncGeneratorRequest* request) { + setFixedSlot(Slot_QueueOrRequest, ObjectValue(*request)); + } + void clearSingleQueueRequest() { + setFixedSlot(Slot_QueueOrRequest, NullValue()); + } + AsyncGeneratorRequest* singleQueueRequest() const { + return &getFixedSlot(Slot_QueueOrRequest) + .toObject() + .as(); + } + + ListObject* queue() const { + return &getFixedSlot(Slot_QueueOrRequest).toObject().as(); + } + void setQueue(ListObject* queue_) { + setFixedSlot(Slot_QueueOrRequest, ObjectValue(*queue_)); + } + + public: + static const JSClass class_; + static const JSClassOps classOps_; + + static AsyncGeneratorObject* create(JSContext* cx, HandleFunction asyncGen); + + bool isSuspendedStart() const { return state() == State_SuspendedStart; } + bool isSuspendedYield() const { return state() == State_SuspendedYield; } + bool isExecuting() const { return state() == State_Executing; } + bool isAwaitingYieldReturn() const { + return state() == State_AwaitingYieldReturn; + } + bool isAwaitingReturn() const { return state() == State_AwaitingReturn; } + bool isCompleted() const { return state() == State_Completed; } + + void setSuspendedStart() { setState(State_SuspendedStart); } + void setSuspendedYield() { setState(State_SuspendedYield); } + void setExecuting() { setState(State_Executing); } + void setAwaitingYieldReturn() { setState(State_AwaitingYieldReturn); } + void setAwaitingReturn() { setState(State_AwaitingReturn); } + void setCompleted() { setState(State_Completed); } + + [[nodiscard]] static bool enqueueRequest( + JSContext* cx, Handle generator, + Handle request); + + static AsyncGeneratorRequest* dequeueRequest( + JSContext* cx, Handle generator); + + static AsyncGeneratorRequest* peekRequest( + Handle generator); + + bool isQueueEmpty() const { + if (isSingleQueue()) { + return isSingleQueueEmpty(); + } + return queue()->getDenseInitializedLength() == 0; + } + + // This function does either of the following: + // * return a cached request object with the slots updated + // * create a new request object with the slots set + static AsyncGeneratorRequest* createRequest( + JSContext* cx, Handle generator, + CompletionKind completionKind, HandleValue completionValue, + Handle promise); + + // Stores the given request to the generator's cache after clearing its data + // slots. The cached request will be reused in the subsequent createRequest + // call. + void cacheRequest(AsyncGeneratorRequest* request) { + if (hasCachedRequest()) { + return; + } + + request->clearData(); + setFixedSlot(Slot_CachedRequest, ObjectValue(*request)); + } + + private: + bool hasCachedRequest() const { + return getFixedSlot(Slot_CachedRequest).isObject(); + } + + AsyncGeneratorRequest* takeCachedRequest() { + auto request = &getFixedSlot(Slot_CachedRequest) + .toObject() + .as(); + clearCachedRequest(); + return request; + } + + void clearCachedRequest() { setFixedSlot(Slot_CachedRequest, NullValue()); } +}; + +JSObject* CreateAsyncFromSyncIterator(JSContext* cx, HandleObject iter, + HandleValue nextMethod); + +class AsyncFromSyncIteratorObject : public NativeObject { + private: + enum AsyncFromSyncIteratorObjectSlots { + // Object that implements the sync iterator protocol. + Slot_Iterator = 0, + + // The `next` property of the iterator object. + Slot_NextMethod = 1, + + Slots + }; + + void init(JSObject* iterator, const Value& nextMethod) { + setFixedSlot(Slot_Iterator, ObjectValue(*iterator)); + setFixedSlot(Slot_NextMethod, nextMethod); + } + + public: + static const JSClass class_; + + static JSObject* create(JSContext* cx, HandleObject iter, + HandleValue nextMethod); + + JSObject* iterator() const { return &getFixedSlot(Slot_Iterator).toObject(); } + + const Value& nextMethod() const { return getFixedSlot(Slot_NextMethod); } +}; + +class AsyncIteratorObject : public NativeObject { + public: + static const JSClass class_; + static const JSClass protoClass_; +}; + +// Iterator Helpers proposal +class AsyncIteratorHelperObject : public NativeObject { + public: + static const JSClass class_; + + enum { GeneratorSlot, SlotCount }; + + static_assert(GeneratorSlot == ASYNC_ITERATOR_HELPER_GENERATOR_SLOT, + "GeneratorSlot must match self-hosting define for generator " + "object slot."); +}; + +AsyncIteratorHelperObject* NewAsyncIteratorHelper(JSContext* cx); + +} // namespace js + +#endif /* vm_AsyncIteration_h */ diff --git a/js/src/vm/AtomsTable.h b/js/src/vm/AtomsTable.h new file mode 100644 index 0000000000..aae7728fe5 --- /dev/null +++ b/js/src/vm/AtomsTable.h @@ -0,0 +1,123 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Implementation details of the atoms table. + */ + +#ifndef vm_AtomsTable_h +#define vm_AtomsTable_h + +#include "gc/Barrier.h" +#include "js/GCHashTable.h" +#include "js/TypeDecls.h" +#include "js/Vector.h" +#include "vm/StringType.h" + +/* + * The atoms table is a mapping from strings to JSAtoms that supports + * incremental sweeping. + */ + +namespace js { + +struct AtomHasher { + struct Lookup; + static inline HashNumber hash(const Lookup& l); + static MOZ_ALWAYS_INLINE bool match(const WeakHeapPtr& entry, + const Lookup& lookup); + static void rekey(WeakHeapPtr& k, + const WeakHeapPtr& newKey) { + k = newKey; + } +}; + +// Note: Use a 'class' here to make forward declarations easier to use. +class AtomSet : public JS::GCHashSet, AtomHasher, + SystemAllocPolicy> { + using Base = + JS::GCHashSet, AtomHasher, SystemAllocPolicy>; + + public: + AtomSet() = default; + explicit AtomSet(size_t length) : Base(length){}; +}; + +// This class is a wrapper for AtomSet that is used to ensure the AtomSet is +// not modified. It should only expose read-only methods from AtomSet. +// Note however that the atoms within the table can be marked during GC. +class FrozenAtomSet { + AtomSet* mSet; + + public: + // This constructor takes ownership of the passed-in AtomSet. + explicit FrozenAtomSet(AtomSet* set) { mSet = set; } + + ~FrozenAtomSet() { js_delete(mSet); } + + MOZ_ALWAYS_INLINE AtomSet::Ptr readonlyThreadsafeLookup( + const AtomSet::Lookup& l) const; + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return mSet->shallowSizeOfIncludingThis(mallocSizeOf); + } + + using Range = AtomSet::Range; + + AtomSet::Range all() const { return mSet->all(); } +}; + +class AtomsTable { + // Use a low initial capacity for atom hash tables to avoid penalizing + // runtimes which create a small number of atoms. + static const size_t InitialTableSize = 16; + + // The main atoms set. + AtomSet atoms; + + // Set of atoms added while the |atoms| set is being swept. + AtomSet* atomsAddedWhileSweeping; + + // List of pinned atoms that are traced in every GC. + Vector pinnedAtoms; + + public: + // An iterator used for sweeping atoms incrementally. + using SweepIterator = AtomSet::Enum; + + AtomsTable(); + ~AtomsTable(); + bool init(); + + template + MOZ_ALWAYS_INLINE JSAtom* atomizeAndCopyCharsNonStaticValidLength( + JSContext* cx, const CharT* chars, size_t length, + const mozilla::Maybe& indexValue, + const AtomHasher::Lookup& lookup); + + bool maybePinExistingAtom(JSContext* cx, JSAtom* atom); + + void tracePinnedAtoms(JSTracer* trc); + + // Sweep all atoms non-incrementally. + void traceWeak(JSTracer* trc); + + bool startIncrementalSweep(mozilla::Maybe& atomsToSweepOut); + + // Sweep some atoms incrementally and return whether we finished. + bool sweepIncrementally(SweepIterator& atomsToSweep, SliceBudget& budget); + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const; + + private: + void mergeAtomsAddedWhileSweeping(); +}; + +bool AtomIsPinned(JSContext* cx, JSAtom* atom); + +} // namespace js + +#endif /* vm_AtomsTable_h */ diff --git a/js/src/vm/BigIntType.cpp b/js/src/vm/BigIntType.cpp new file mode 100644 index 0000000000..0f9621da54 --- /dev/null +++ b/js/src/vm/BigIntType.cpp @@ -0,0 +1,3847 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Portions of this code taken from WebKit, whose copyright is as follows: + * + * Copyright (C) 2017 Caio Lima + * Copyright (C) 2017-2018 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Portions of this code taken from V8, whose copyright notice is as follows: + * + * Copyright 2017 the V8 project authors. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Portions of this code taken from Dart, whose copyright notice is as follows: + * + * Copyright (c) 2014 the Dart project authors. Please see the AUTHORS file + * [1] for details. All rights reserved. Use of this source code is governed by + * a BSD-style license that can be found in the LICENSE file [2]. + * + * [1] https://github.com/dart-lang/sdk/blob/master/AUTHORS + * [2] https://github.com/dart-lang/sdk/blob/master/LICENSE + * + * Portions of this code taken from Go, whose copyright notice is as follows: + * + * Copyright 2009 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file [3]. + * + * [3] https://golang.org/LICENSE + */ + +#include "vm/BigIntType.h" + +#include "mozilla/Casting.h" +#include "mozilla/FloatingPoint.h" +#include "mozilla/HashFunctions.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Maybe.h" +#include "mozilla/MemoryChecking.h" +#include "mozilla/Range.h" +#include "mozilla/RangedPtr.h" +#include "mozilla/Span.h" // mozilla::Span +#include "mozilla/WrappingOperations.h" + +#include +#include +#include +#include // std::is_same_v + +#include "jsnum.h" + +#include "gc/Allocator.h" +#include "js/BigInt.h" +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "js/StableStringChars.h" +#include "js/Utility.h" +#include "util/CheckedArithmetic.h" +#include "util/DifferentialTesting.h" +#include "vm/JSContext.h" +#include "vm/StaticStrings.h" + +#include "gc/GCContext-inl.h" +#include "gc/Nursery-inl.h" +#include "vm/JSContext-inl.h" + +using namespace js; + +using JS::AutoStableStringChars; +using mozilla::Abs; +using mozilla::AssertedCast; +using mozilla::BitwiseCast; +using mozilla::Maybe; +using mozilla::NegativeInfinity; +using mozilla::Nothing; +using mozilla::PositiveInfinity; +using mozilla::Range; +using mozilla::RangedPtr; +using mozilla::Some; +using mozilla::WrapToSigned; + +static inline unsigned DigitLeadingZeroes(BigInt::Digit x) { + return sizeof(x) == 4 ? mozilla::CountLeadingZeroes32(x) + : mozilla::CountLeadingZeroes64(x); +} + +#ifdef DEBUG +static bool HasLeadingZeroes(BigInt* bi) { + return bi->digitLength() > 0 && bi->digit(bi->digitLength() - 1) == 0; +} +#endif + +BigInt* BigInt::createUninitialized(JSContext* cx, size_t digitLength, + bool isNegative, gc::Heap heap) { + if (digitLength > MaxDigitLength) { + ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + + BigInt* x = cx->newCell(heap); + if (!x) { + return nullptr; + } + + x->setLengthAndFlags(digitLength, isNegative ? SignBit : 0); + + MOZ_ASSERT(x->digitLength() == digitLength); + MOZ_ASSERT(x->isNegative() == isNegative); + + if (digitLength > InlineDigitsLength) { + x->heapDigits_ = js::AllocateBigIntDigits(cx, x, digitLength); + if (!x->heapDigits_) { + // |x| is partially initialized, expose it as a BigInt using inline digits + // to the GC. + x->setLengthAndFlags(0, 0); + return nullptr; + } + + AddCellMemory(x, digitLength * sizeof(Digit), js::MemoryUse::BigIntDigits); + } + + return x; +} + +void BigInt::initializeDigitsToZero() { + auto digs = digits(); + std::uninitialized_fill_n(digs.begin(), digs.Length(), 0); +} + +void BigInt::finalize(JS::GCContext* gcx) { + MOZ_ASSERT(isTenured()); + if (hasHeapDigits()) { + size_t size = digitLength() * sizeof(Digit); + gcx->free_(this, heapDigits_, size, js::MemoryUse::BigIntDigits); + } +} + +js::HashNumber BigInt::hash() const { + js::HashNumber h = + mozilla::HashBytes(digits().data(), digitLength() * sizeof(Digit)); + return mozilla::AddToHash(h, isNegative()); +} + +size_t BigInt::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return hasInlineDigits() ? 0 : mallocSizeOf(heapDigits_); +} + +size_t BigInt::sizeOfExcludingThisInNursery( + mozilla::MallocSizeOf mallocSizeOf) const { + MOZ_ASSERT(!isTenured()); + + if (hasInlineDigits()) { + return 0; + } + + const Nursery& nursery = runtimeFromMainThread()->gc.nursery(); + if (nursery.isInside(heapDigits_)) { + // See |AllocateBigIntDigits()|. + return RoundUp(digitLength() * sizeof(Digit), sizeof(Value)); + } + + return mallocSizeOf(heapDigits_); +} + +BigInt* BigInt::zero(JSContext* cx, gc::Heap heap) { + return createUninitialized(cx, 0, false, heap); +} + +BigInt* BigInt::createFromDigit(JSContext* cx, Digit d, bool isNegative) { + MOZ_ASSERT(d != 0); + BigInt* res = createUninitialized(cx, 1, isNegative); + if (!res) { + return nullptr; + } + res->setDigit(0, d); + return res; +} + +BigInt* BigInt::one(JSContext* cx) { return createFromDigit(cx, 1, false); } + +BigInt* BigInt::negativeOne(JSContext* cx) { + return createFromDigit(cx, 1, true); +} + +BigInt* BigInt::createFromNonZeroRawUint64(JSContext* cx, uint64_t n, + bool isNegative) { + MOZ_ASSERT(n != 0); + + size_t resultLength = 1; + if (DigitBits == 32 && (n >> 32) != 0) { + resultLength = 2; + } + + BigInt* result = createUninitialized(cx, resultLength, isNegative); + if (!result) { + return nullptr; + } + result->setDigit(0, n); + if (DigitBits == 32 && resultLength > 1) { + result->setDigit(1, n >> 32); + } + + MOZ_ASSERT(!HasLeadingZeroes(result)); + return result; +} + +BigInt* BigInt::neg(JSContext* cx, HandleBigInt x) { + if (x->isZero()) { + return x; + } + + BigInt* result = copy(cx, x); + if (!result) { + return nullptr; + } + result->toggleHeaderFlagBit(SignBit); + return result; +} + +#if !defined(JS_64BIT) +# define HAVE_TWO_DIGIT 1 +using TwoDigit = uint64_t; +#elif defined(__SIZEOF_INT128__) +# define HAVE_TWO_DIGIT 1 +using TwoDigit = __uint128_t; +#endif + +inline BigInt::Digit BigInt::digitMul(Digit a, Digit b, Digit* high) { +#if defined(HAVE_TWO_DIGIT) + TwoDigit result = static_cast(a) * static_cast(b); + *high = result >> DigitBits; + + return static_cast(result); +#else + // Multiply in half-pointer-sized chunks. + // For inputs [AH AL]*[BH BL], the result is: + // + // [AL*BL] // rLow + // + [AL*BH] // rMid1 + // + [AH*BL] // rMid2 + // + [AH*BH] // rHigh + // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1] + // + // Where of course we must be careful with carries between the columns. + Digit aLow = a & HalfDigitMask; + Digit aHigh = a >> HalfDigitBits; + Digit bLow = b & HalfDigitMask; + Digit bHigh = b >> HalfDigitBits; + + Digit rLow = aLow * bLow; + Digit rMid1 = aLow * bHigh; + Digit rMid2 = aHigh * bLow; + Digit rHigh = aHigh * bHigh; + + Digit carry = 0; + Digit low = digitAdd(rLow, rMid1 << HalfDigitBits, &carry); + low = digitAdd(low, rMid2 << HalfDigitBits, &carry); + + *high = (rMid1 >> HalfDigitBits) + (rMid2 >> HalfDigitBits) + rHigh + carry; + + return low; +#endif +} + +BigInt::Digit BigInt::digitDiv(Digit high, Digit low, Digit divisor, + Digit* remainder) { + MOZ_ASSERT(high < divisor, "division must not overflow"); +#if defined(__x86_64__) + Digit quotient; + Digit rem; + __asm__("divq %[divisor]" + // Outputs: `quotient` will be in rax, `rem` in rdx. + : "=a"(quotient), "=d"(rem) + // Inputs: put `high` into rdx, `low` into rax, and `divisor` into + // any register or stack slot. + : "d"(high), "a"(low), [divisor] "rm"(divisor)); + *remainder = rem; + return quotient; +#elif defined(__i386__) + Digit quotient; + Digit rem; + __asm__("divl %[divisor]" + // Outputs: `quotient` will be in eax, `rem` in edx. + : "=a"(quotient), "=d"(rem) + // Inputs: put `high` into edx, `low` into eax, and `divisor` into + // any register or stack slot. + : "d"(high), "a"(low), [divisor] "rm"(divisor)); + *remainder = rem; + return quotient; +#else + static constexpr Digit HalfDigitBase = 1ull << HalfDigitBits; + // Adapted from Warren, Hacker's Delight, p. 152. + unsigned s = DigitLeadingZeroes(divisor); + // If `s` is DigitBits here, it causes an undefined behavior. + // But `s` is never DigitBits since `divisor` is never zero here. + MOZ_ASSERT(s != DigitBits); + divisor <<= s; + + Digit vn1 = divisor >> HalfDigitBits; + Digit vn0 = divisor & HalfDigitMask; + + // `sZeroMask` which is 0 if s == 0 and all 1-bits otherwise. + // + // `s` can be 0. If `s` is 0, performing "low >> (DigitBits - s)" must not + // be done since it causes an undefined behavior since `>> DigitBits` is + // undefined in C++. Quoted from C++ spec, "The type of the result is that of + // the promoted left operand. + // + // The behavior is undefined if the right operand is negative, or greater + // than or equal to the length in bits of the promoted left operand". We + // mask the right operand of the shift by `shiftMask` (`DigitBits - 1`), + // which makes `DigitBits - 0` zero. + // + // This shifting produces a value which covers 0 < `s` <= (DigitBits - 1) + // cases. `s` == DigitBits never happen as we asserted. Since `sZeroMask` + // clears the value in the case of `s` == 0, `s` == 0 case is also covered. + static_assert(sizeof(intptr_t) == sizeof(Digit), + "unexpected size of BigInt::Digit"); + Digit sZeroMask = + static_cast((-static_cast(s)) >> (DigitBits - 1)); + static constexpr unsigned shiftMask = DigitBits - 1; + Digit un32 = + (high << s) | ((low >> ((DigitBits - s) & shiftMask)) & sZeroMask); + + Digit un10 = low << s; + Digit un1 = un10 >> HalfDigitBits; + Digit un0 = un10 & HalfDigitMask; + Digit q1 = un32 / vn1; + Digit rhat = un32 - q1 * vn1; + + while (q1 >= HalfDigitBase || q1 * vn0 > rhat * HalfDigitBase + un1) { + q1--; + rhat += vn1; + if (rhat >= HalfDigitBase) { + break; + } + } + + Digit un21 = un32 * HalfDigitBase + un1 - q1 * divisor; + Digit q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= HalfDigitBase || q0 * vn0 > rhat * HalfDigitBase + un0) { + q0--; + rhat += vn1; + if (rhat >= HalfDigitBase) { + break; + } + } + + *remainder = (un21 * HalfDigitBase + un0 - q0 * divisor) >> s; + return q1 * HalfDigitBase + q0; +#endif +} + +// Multiplies `source` with `factor` and adds `summand` to the result. +// `result` and `source` may be the same BigInt for inplace modification. +void BigInt::internalMultiplyAdd(BigInt* source, Digit factor, Digit summand, + unsigned n, BigInt* result) { + MOZ_ASSERT(source->digitLength() >= n); + MOZ_ASSERT(result->digitLength() >= n); + + Digit carry = summand; + Digit high = 0; + for (unsigned i = 0; i < n; i++) { + Digit current = source->digit(i); + Digit newCarry = 0; + + // Compute this round's multiplication. + Digit newHigh = 0; + current = digitMul(current, factor, &newHigh); + + // Add last round's carryovers. + current = digitAdd(current, high, &newCarry); + current = digitAdd(current, carry, &newCarry); + + // Store result and prepare for next round. + result->setDigit(i, current); + carry = newCarry; + high = newHigh; + } + + if (result->digitLength() > n) { + result->setDigit(n++, carry + high); + + // Current callers don't pass in such large results, but let's be robust. + while (n < result->digitLength()) { + result->setDigit(n++, 0); + } + } else { + MOZ_ASSERT(!(carry + high)); + } +} + +// Multiplies `this` with `factor` and adds `summand` to the result. +void BigInt::inplaceMultiplyAdd(Digit factor, Digit summand) { + internalMultiplyAdd(this, factor, summand, digitLength(), this); +} + +// Multiplies `multiplicand` with `multiplier` and adds the result to +// `accumulator`, starting at `accumulatorIndex` for the least-significant +// digit. Callers must ensure that `accumulator`'s digitLength and +// corresponding digit storage is long enough to hold the result. +void BigInt::multiplyAccumulate(BigInt* multiplicand, Digit multiplier, + BigInt* accumulator, + unsigned accumulatorIndex) { + MOZ_ASSERT(accumulator->digitLength() > + multiplicand->digitLength() + accumulatorIndex); + if (!multiplier) { + return; + } + + Digit carry = 0; + Digit high = 0; + for (unsigned i = 0; i < multiplicand->digitLength(); + i++, accumulatorIndex++) { + Digit acc = accumulator->digit(accumulatorIndex); + Digit newCarry = 0; + + // Add last round's carryovers. + acc = digitAdd(acc, high, &newCarry); + acc = digitAdd(acc, carry, &newCarry); + + // Compute this round's multiplication. + Digit multiplicandDigit = multiplicand->digit(i); + Digit low = digitMul(multiplier, multiplicandDigit, &high); + acc = digitAdd(acc, low, &newCarry); + + // Store result and prepare for next round. + accumulator->setDigit(accumulatorIndex, acc); + carry = newCarry; + } + + while (carry || high) { + MOZ_ASSERT(accumulatorIndex < accumulator->digitLength()); + Digit acc = accumulator->digit(accumulatorIndex); + Digit newCarry = 0; + acc = digitAdd(acc, high, &newCarry); + high = 0; + acc = digitAdd(acc, carry, &newCarry); + accumulator->setDigit(accumulatorIndex, acc); + carry = newCarry; + accumulatorIndex++; + } +} + +inline int8_t BigInt::absoluteCompare(BigInt* x, BigInt* y) { + MOZ_ASSERT(!HasLeadingZeroes(x)); + MOZ_ASSERT(!HasLeadingZeroes(y)); + + // Sanity checks to catch negative zeroes escaping to the wild. + MOZ_ASSERT(!x->isNegative() || !x->isZero()); + MOZ_ASSERT(!y->isNegative() || !y->isZero()); + + int diff = x->digitLength() - y->digitLength(); + if (diff) { + return diff < 0 ? -1 : 1; + } + + int i = x->digitLength() - 1; + while (i >= 0 && x->digit(i) == y->digit(i)) { + i--; + } + + if (i < 0) { + return 0; + } + + return x->digit(i) > y->digit(i) ? 1 : -1; +} + +BigInt* BigInt::absoluteAdd(JSContext* cx, HandleBigInt x, HandleBigInt y, + bool resultNegative) { + bool swap = x->digitLength() < y->digitLength(); + // Ensure `left` has at least as many digits as `right`. + HandleBigInt& left = swap ? y : x; + HandleBigInt& right = swap ? x : y; + + if (left->isZero()) { + MOZ_ASSERT(right->isZero()); + return left; + } + + if (right->isZero()) { + return resultNegative == left->isNegative() ? left : neg(cx, left); + } + + // Fast path for the likely-common case of up to a uint64_t of magnitude. + if (left->absFitsInUint64()) { + MOZ_ASSERT(right->absFitsInUint64()); + + uint64_t lhs = left->uint64FromAbsNonZero(); + uint64_t rhs = right->uint64FromAbsNonZero(); + + uint64_t res = lhs + rhs; + bool overflow = res < lhs; + MOZ_ASSERT(res != 0 || overflow); + + size_t resultLength = 1; + if (DigitBits == 32) { + if (overflow) { + resultLength = 3; + } else if (res >> 32) { + resultLength = 2; + } + } else { + if (overflow) { + resultLength = 2; + } + } + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + result->setDigit(0, res); + if (DigitBits == 32 && resultLength > 1) { + result->setDigit(1, res >> 32); + } + if (overflow) { + constexpr size_t overflowIndex = DigitBits == 32 ? 2 : 1; + result->setDigit(overflowIndex, 1); + } + + MOZ_ASSERT(!HasLeadingZeroes(result)); + return result; + } + + BigInt* result = + createUninitialized(cx, left->digitLength() + 1, resultNegative); + if (!result) { + return nullptr; + } + Digit carry = 0; + unsigned i = 0; + for (; i < right->digitLength(); i++) { + Digit newCarry = 0; + Digit sum = digitAdd(left->digit(i), right->digit(i), &newCarry); + sum = digitAdd(sum, carry, &newCarry); + result->setDigit(i, sum); + carry = newCarry; + } + + for (; i < left->digitLength(); i++) { + Digit newCarry = 0; + Digit sum = digitAdd(left->digit(i), carry, &newCarry); + result->setDigit(i, sum); + carry = newCarry; + } + + result->setDigit(i, carry); + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::absoluteSub(JSContext* cx, HandleBigInt x, HandleBigInt y, + bool resultNegative) { + MOZ_ASSERT(x->digitLength() >= y->digitLength()); + MOZ_ASSERT(absoluteCompare(x, y) > 0); + MOZ_ASSERT(!x->isZero()); + + if (y->isZero()) { + return resultNegative == x->isNegative() ? x : neg(cx, x); + } + + // Fast path for the likely-common case of up to a uint64_t of magnitude. + if (x->absFitsInUint64()) { + MOZ_ASSERT(y->absFitsInUint64()); + + uint64_t lhs = x->uint64FromAbsNonZero(); + uint64_t rhs = y->uint64FromAbsNonZero(); + MOZ_ASSERT(lhs > rhs); + + uint64_t res = lhs - rhs; + MOZ_ASSERT(res != 0); + + return createFromNonZeroRawUint64(cx, res, resultNegative); + } + + BigInt* result = createUninitialized(cx, x->digitLength(), resultNegative); + if (!result) { + return nullptr; + } + Digit borrow = 0; + unsigned i = 0; + for (; i < y->digitLength(); i++) { + Digit newBorrow = 0; + Digit difference = digitSub(x->digit(i), y->digit(i), &newBorrow); + difference = digitSub(difference, borrow, &newBorrow); + result->setDigit(i, difference); + borrow = newBorrow; + } + + for (; i < x->digitLength(); i++) { + Digit newBorrow = 0; + Digit difference = digitSub(x->digit(i), borrow, &newBorrow); + result->setDigit(i, difference); + borrow = newBorrow; + } + + MOZ_ASSERT(!borrow); + return destructivelyTrimHighZeroDigits(cx, result); +} + +// Divides `x` by `divisor`, returning the result in `quotient` and `remainder`. +// Mathematically, the contract is: +// +// quotient = (x - remainder) / divisor, with 0 <= remainder < divisor. +// +// If `quotient` is an empty handle, an appropriately sized BigInt will be +// allocated for it; otherwise the caller must ensure that it is big enough. +// `quotient` can be the same as `x` for an in-place division. `quotient` can +// also be `Nothing()` if the caller is only interested in the remainder. +// +// This function returns false if `quotient` is an empty handle, but allocating +// the quotient failed. Otherwise it returns true, indicating success. +bool BigInt::absoluteDivWithDigitDivisor( + JSContext* cx, HandleBigInt x, Digit divisor, + const Maybe& quotient, Digit* remainder, + bool quotientNegative) { + MOZ_ASSERT(divisor); + + MOZ_ASSERT(!x->isZero()); + *remainder = 0; + if (divisor == 1) { + if (quotient) { + BigInt* q; + if (x->isNegative() == quotientNegative) { + q = x; + } else { + q = neg(cx, x); + if (!q) { + return false; + } + } + quotient.value().set(q); + } + return true; + } + + unsigned length = x->digitLength(); + if (quotient) { + if (!quotient.value()) { + BigInt* q = createUninitialized(cx, length, quotientNegative); + if (!q) { + return false; + } + quotient.value().set(q); + } + + for (int i = length - 1; i >= 0; i--) { + Digit q = digitDiv(*remainder, x->digit(i), divisor, remainder); + quotient.value()->setDigit(i, q); + } + } else { + for (int i = length - 1; i >= 0; i--) { + digitDiv(*remainder, x->digit(i), divisor, remainder); + } + } + + return true; +} + +// Adds `summand` onto `this`, starting with `summand`'s 0th digit +// at `this`'s `startIndex`'th digit. Returns the "carry" (0 or 1). +BigInt::Digit BigInt::absoluteInplaceAdd(BigInt* summand, unsigned startIndex) { + Digit carry = 0; + unsigned n = summand->digitLength(); + MOZ_ASSERT(digitLength() > startIndex, + "must start adding at an in-range digit"); + MOZ_ASSERT(digitLength() - startIndex >= n, + "digits being added to must not extend above the digits in " + "this (except for the returned carry digit)"); + for (unsigned i = 0; i < n; i++) { + Digit newCarry = 0; + Digit sum = digitAdd(digit(startIndex + i), summand->digit(i), &newCarry); + sum = digitAdd(sum, carry, &newCarry); + setDigit(startIndex + i, sum); + carry = newCarry; + } + + return carry; +} + +// Subtracts `subtrahend` from this, starting with `subtrahend`'s 0th digit +// at `this`'s `startIndex`-th digit. Returns the "borrow" (0 or 1). +BigInt::Digit BigInt::absoluteInplaceSub(BigInt* subtrahend, + unsigned startIndex) { + Digit borrow = 0; + unsigned n = subtrahend->digitLength(); + MOZ_ASSERT(digitLength() > startIndex, + "must start subtracting from an in-range digit"); + MOZ_ASSERT(digitLength() - startIndex >= n, + "digits being subtracted from must not extend above the " + "digits in this (except for the returned borrow digit)"); + for (unsigned i = 0; i < n; i++) { + Digit newBorrow = 0; + Digit difference = + digitSub(digit(startIndex + i), subtrahend->digit(i), &newBorrow); + difference = digitSub(difference, borrow, &newBorrow); + setDigit(startIndex + i, difference); + borrow = newBorrow; + } + + return borrow; +} + +// Returns whether (factor1 * factor2) > (high << kDigitBits) + low. +inline bool BigInt::productGreaterThan(Digit factor1, Digit factor2, Digit high, + Digit low) { + Digit resultHigh; + Digit resultLow = digitMul(factor1, factor2, &resultHigh); + return resultHigh > high || (resultHigh == high && resultLow > low); +} + +void BigInt::inplaceRightShiftLowZeroBits(unsigned shift) { + MOZ_ASSERT(shift < DigitBits); + MOZ_ASSERT(!(digit(0) & ((static_cast(1) << shift) - 1)), + "should only be shifting away zeroes"); + + if (!shift) { + return; + } + + Digit carry = digit(0) >> shift; + unsigned last = digitLength() - 1; + for (unsigned i = 0; i < last; i++) { + Digit d = digit(i + 1); + setDigit(i, (d << (DigitBits - shift)) | carry); + carry = d >> shift; + } + setDigit(last, carry); +} + +// Always copies the input, even when `shift` == 0. +BigInt* BigInt::absoluteLeftShiftAlwaysCopy(JSContext* cx, HandleBigInt x, + unsigned shift, + LeftShiftMode mode) { + MOZ_ASSERT(shift < DigitBits); + MOZ_ASSERT(!x->isZero()); + + unsigned n = x->digitLength(); + unsigned resultLength = mode == LeftShiftMode::AlwaysAddOneDigit ? n + 1 : n; + BigInt* result = createUninitialized(cx, resultLength, x->isNegative()); + if (!result) { + return nullptr; + } + + if (!shift) { + for (unsigned i = 0; i < n; i++) { + result->setDigit(i, x->digit(i)); + } + if (mode == LeftShiftMode::AlwaysAddOneDigit) { + result->setDigit(n, 0); + } + + return result; + } + + Digit carry = 0; + for (unsigned i = 0; i < n; i++) { + Digit d = x->digit(i); + result->setDigit(i, (d << shift) | carry); + carry = d >> (DigitBits - shift); + } + + if (mode == LeftShiftMode::AlwaysAddOneDigit) { + result->setDigit(n, carry); + } else { + MOZ_ASSERT(mode == LeftShiftMode::SameSizeResult); + MOZ_ASSERT(!carry); + } + + return result; +} + +// Divides `dividend` by `divisor`, returning the result in `quotient` and +// `remainder`. Mathematically, the contract is: +// +// quotient = (dividend - remainder) / divisor, with 0 <= remainder < divisor. +// +// Both `quotient` and `remainder` are optional, for callers that are only +// interested in one of them. See Knuth, Volume 2, section 4.3.1, Algorithm D. +// Also see the overview of the algorithm by Jan Marthedal Rasmussen over at +// https://janmr.com/blog/2014/04/basic-multiple-precision-long-division/. +bool BigInt::absoluteDivWithBigIntDivisor( + JSContext* cx, HandleBigInt dividend, HandleBigInt divisor, + const Maybe& quotient, + const Maybe& remainder, bool isNegative) { + MOZ_ASSERT(divisor->digitLength() >= 2); + MOZ_ASSERT(dividend->digitLength() >= divisor->digitLength()); + + // Any early error return is detectable by checking the quotient and/or + // remainder output values. + MOZ_ASSERT(!quotient || !quotient.value()); + MOZ_ASSERT(!remainder || !remainder.value()); + + // The unusual variable names inside this function are consistent with + // Knuth's book, as well as with Go's implementation of this algorithm. + // Maintaining this consistency is probably more useful than trying to + // come up with more descriptive names for them. + const unsigned n = divisor->digitLength(); + const unsigned m = dividend->digitLength() - n; + + // The quotient to be computed. + RootedBigInt q(cx); + if (quotient) { + q = createUninitialized(cx, m + 1, isNegative); + if (!q) { + return false; + } + } + + // In each iteration, `qhatv` holds `divisor` * `current quotient digit`. + // "v" is the book's name for `divisor`, `qhat` the current quotient digit. + RootedBigInt qhatv(cx, createUninitialized(cx, n + 1, isNegative)); + if (!qhatv) { + return false; + } + + // D1. + // Left-shift inputs so that the divisor's MSB is set. This is necessary to + // prevent the digit-wise divisions (see digitDiv call below) from + // overflowing (they take a two digits wide input, and return a one digit + // result). + Digit lastDigit = divisor->digit(n - 1); + unsigned shift = DigitLeadingZeroes(lastDigit); + + RootedBigInt shiftedDivisor(cx); + if (shift > 0) { + shiftedDivisor = absoluteLeftShiftAlwaysCopy(cx, divisor, shift, + LeftShiftMode::SameSizeResult); + if (!shiftedDivisor) { + return false; + } + } else { + shiftedDivisor = divisor; + } + + // Holds the (continuously updated) remaining part of the dividend, which + // eventually becomes the remainder. + RootedBigInt u(cx, + absoluteLeftShiftAlwaysCopy(cx, dividend, shift, + LeftShiftMode::AlwaysAddOneDigit)); + if (!u) { + return false; + } + + // D2. + // Iterate over the dividend's digit (like the "grade school" algorithm). + // `vn1` is the divisor's most significant digit. + Digit vn1 = shiftedDivisor->digit(n - 1); + for (int j = m; j >= 0; j--) { + // D3. + // Estimate the current iteration's quotient digit (see Knuth for details). + // `qhat` is the current quotient digit. + Digit qhat = std::numeric_limits::max(); + + // `ujn` is the dividend's most significant remaining digit. + Digit ujn = u->digit(j + n); + if (ujn != vn1) { + // `rhat` is the current iteration's remainder. + Digit rhat = 0; + // Estimate the current quotient digit by dividing the most significant + // digits of dividend and divisor. The result will not be too small, + // but could be a bit too large. + qhat = digitDiv(ujn, u->digit(j + n - 1), vn1, &rhat); + + // Decrement the quotient estimate as needed by looking at the next + // digit, i.e. by testing whether + // qhat * v_{n-2} > (rhat << DigitBits) + u_{j+n-2}. + Digit vn2 = shiftedDivisor->digit(n - 2); + Digit ujn2 = u->digit(j + n - 2); + while (productGreaterThan(qhat, vn2, rhat, ujn2)) { + qhat--; + Digit prevRhat = rhat; + rhat += vn1; + // v[n-1] >= 0, so this tests for overflow. + if (rhat < prevRhat) { + break; + } + } + } + + // D4. + // Multiply the divisor with the current quotient digit, and subtract + // it from the dividend. If there was "borrow", then the quotient digit + // was one too high, so we must correct it and undo one subtraction of + // the (shifted) divisor. + internalMultiplyAdd(shiftedDivisor, qhat, 0, n, qhatv); + Digit c = u->absoluteInplaceSub(qhatv, j); + if (c) { + c = u->absoluteInplaceAdd(shiftedDivisor, j); + u->setDigit(j + n, u->digit(j + n) + c); + qhat--; + } + + if (quotient) { + q->setDigit(j, qhat); + } + } + + if (quotient) { + BigInt* bi = destructivelyTrimHighZeroDigits(cx, q); + if (!bi) { + return false; + } + quotient.value().set(q); + } + + if (remainder) { + u->inplaceRightShiftLowZeroBits(shift); + remainder.value().set(u); + } + + return true; +} + +// Helper for Absolute{And,AndNot,Or,Xor}. +// Performs the given binary `op` on digit pairs of `x` and `y`; when the +// end of the shorter of the two is reached, `kind` configures how +// remaining digits are handled. +// Example: +// y: [ y2 ][ y1 ][ y0 ] +// x: [ x3 ][ x2 ][ x1 ][ x0 ] +// | | | | +// (Fill) (op) (op) (op) +// | | | | +// v v v v +// result: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ] +template +inline BigInt* BigInt::absoluteBitwiseOp(JSContext* cx, HandleBigInt x, + HandleBigInt y, BitwiseOp&& op) { + unsigned xLength = x->digitLength(); + unsigned yLength = y->digitLength(); + unsigned numPairs = std::min(xLength, yLength); + unsigned resultLength; + if (kind == BitwiseOpKind::SymmetricTrim) { + resultLength = numPairs; + } else if (kind == BitwiseOpKind::SymmetricFill) { + resultLength = std::max(xLength, yLength); + } else { + MOZ_ASSERT(kind == BitwiseOpKind::AsymmetricFill); + resultLength = xLength; + } + bool resultNegative = false; + + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + + unsigned i = 0; + for (; i < numPairs; i++) { + result->setDigit(i, op(x->digit(i), y->digit(i))); + } + + if (kind != BitwiseOpKind::SymmetricTrim) { + BigInt* source = kind == BitwiseOpKind::AsymmetricFill ? x + : xLength == i ? y + : x; + for (; i < resultLength; i++) { + result->setDigit(i, source->digit(i)); + } + } + + MOZ_ASSERT(i == resultLength); + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::absoluteAnd(JSContext* cx, HandleBigInt x, HandleBigInt y) { + return absoluteBitwiseOp(cx, x, y, + std::bit_and()); +} + +BigInt* BigInt::absoluteOr(JSContext* cx, HandleBigInt x, HandleBigInt y) { + return absoluteBitwiseOp(cx, x, y, + std::bit_or()); +} + +BigInt* BigInt::absoluteAndNot(JSContext* cx, HandleBigInt x, HandleBigInt y) { + auto digitOperation = [](Digit a, Digit b) { return a & ~b; }; + return absoluteBitwiseOp(cx, x, y, + digitOperation); +} + +BigInt* BigInt::absoluteXor(JSContext* cx, HandleBigInt x, HandleBigInt y) { + return absoluteBitwiseOp(cx, x, y, + std::bit_xor()); +} + +BigInt* BigInt::absoluteAddOne(JSContext* cx, HandleBigInt x, + bool resultNegative) { + unsigned inputLength = x->digitLength(); + // The addition will overflow into a new digit if all existing digits are + // at maximum. + bool willOverflow = true; + for (unsigned i = 0; i < inputLength; i++) { + if (std::numeric_limits::max() != x->digit(i)) { + willOverflow = false; + break; + } + } + + unsigned resultLength = inputLength + willOverflow; + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + + Digit carry = 1; + for (unsigned i = 0; i < inputLength; i++) { + Digit newCarry = 0; + result->setDigit(i, digitAdd(x->digit(i), carry, &newCarry)); + carry = newCarry; + } + if (resultLength > inputLength) { + MOZ_ASSERT(carry == 1); + result->setDigit(inputLength, 1); + } else { + MOZ_ASSERT(!carry); + } + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::absoluteSubOne(JSContext* cx, HandleBigInt x, + bool resultNegative) { + MOZ_ASSERT(!x->isZero()); + + unsigned length = x->digitLength(); + + if (length == 1) { + Digit d = x->digit(0); + if (d == 1) { + // Ignore resultNegative. + return zero(cx); + } + return createFromDigit(cx, d - 1, resultNegative); + } + + BigInt* result = createUninitialized(cx, length, resultNegative); + if (!result) { + return nullptr; + } + + Digit borrow = 1; + for (unsigned i = 0; i < length; i++) { + Digit newBorrow = 0; + result->setDigit(i, digitSub(x->digit(i), borrow, &newBorrow)); + borrow = newBorrow; + } + MOZ_ASSERT(!borrow); + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::inc(JSContext* cx, HandleBigInt x) { + if (x->isZero()) { + return one(cx); + } + + bool isNegative = x->isNegative(); + if (isNegative) { + return absoluteSubOne(cx, x, isNegative); + } + + return absoluteAddOne(cx, x, isNegative); +} + +BigInt* BigInt::dec(JSContext* cx, HandleBigInt x) { + if (x->isZero()) { + return negativeOne(cx); + } + + bool isNegative = x->isNegative(); + if (isNegative) { + return absoluteAddOne(cx, x, isNegative); + } + + return absoluteSubOne(cx, x, isNegative); +} + +// Lookup table for the maximum number of bits required per character of a +// base-N string representation of a number. To increase accuracy, the array +// value is the actual value multiplied by 32. To generate this table: +// for (var i = 0; i <= 36; i++) { print(Math.ceil(Math.log2(i) * 32) + ","); } +static constexpr uint8_t maxBitsPerCharTable[] = { + 0, 0, 32, 51, 64, 75, 83, 90, 96, // 0..8 + 102, 107, 111, 115, 119, 122, 126, 128, // 9..16 + 131, 134, 136, 139, 141, 143, 145, 147, // 17..24 + 149, 151, 153, 154, 156, 158, 159, 160, // 25..32 + 162, 163, 165, 166, // 33..36 +}; + +static constexpr unsigned bitsPerCharTableShift = 5; +static constexpr size_t bitsPerCharTableMultiplier = 1u + << bitsPerCharTableShift; +static constexpr char radixDigits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + +static inline uint64_t CeilDiv(uint64_t numerator, uint64_t denominator) { + MOZ_ASSERT(numerator != 0); + return 1 + (numerator - 1) / denominator; +}; + +// Compute (an overapproximation of) the length of the string representation of +// a BigInt. In base B an X-digit number has maximum value: +// +// B**X - 1 +// +// We're trying to find N for an N-digit number in base |radix| full +// representing a |bitLength|-digit number in base 2, so we have: +// +// radix**N - 1 ≥ 2**bitLength - 1 +// radix**N ≥ 2**bitLength +// N ≥ log2(2**bitLength) / log2(radix) +// N ≥ bitLength / log2(radix) +// +// so the smallest N is: +// +// N = ⌈bitLength / log2(radix)⌉ +// +// We want to avoid floating-point computations and precompute the logarithm, so +// we multiply both sides of the division by |bitsPerCharTableMultiplier|: +// +// N = ⌈(bPCTM * bitLength) / (bPCTM * log2(radix))⌉ +// +// and then because |maxBitsPerChar| representing the denominator may have been +// rounded *up* -- which could produce an overall under-computation -- we reduce +// by one to undo any rounding and conservatively compute: +// +// N ≥ ⌈(bPCTM * bitLength) / (maxBitsPerChar - 1)⌉ +// +size_t BigInt::calculateMaximumCharactersRequired(HandleBigInt x, + unsigned radix) { + MOZ_ASSERT(!x->isZero()); + MOZ_ASSERT(radix >= 2 && radix <= 36); + + size_t length = x->digitLength(); + Digit lastDigit = x->digit(length - 1); + size_t bitLength = length * DigitBits - DigitLeadingZeroes(lastDigit); + + uint8_t maxBitsPerChar = maxBitsPerCharTable[radix]; + uint64_t maximumCharactersRequired = + CeilDiv(static_cast(bitsPerCharTableMultiplier) * bitLength, + maxBitsPerChar - 1); + maximumCharactersRequired += x->isNegative(); + + return AssertedCast(maximumCharactersRequired); +} + +template +JSLinearString* BigInt::toStringBasePowerOfTwo(JSContext* cx, HandleBigInt x, + unsigned radix) { + MOZ_ASSERT(mozilla::IsPowerOfTwo(radix)); + MOZ_ASSERT(radix >= 2 && radix <= 32); + MOZ_ASSERT(!x->isZero()); + + const unsigned length = x->digitLength(); + const bool sign = x->isNegative(); + const unsigned bitsPerChar = mozilla::CountTrailingZeroes32(radix); + const unsigned charMask = radix - 1; + // Compute the length of the resulting string: divide the bit length of the + // BigInt by the number of bits representable per character (rounding up). + const Digit msd = x->digit(length - 1); + + const size_t bitLength = length * DigitBits - DigitLeadingZeroes(msd); + const size_t charsRequired = CeilDiv(bitLength, bitsPerChar) + sign; + + if (charsRequired > JSString::MAX_LENGTH) { + if constexpr (allowGC) { + ReportAllocationOverflow(cx); + } + return nullptr; + } + + auto resultChars = cx->make_pod_array(charsRequired); + if (!resultChars) { + if constexpr (!allowGC) { + cx->recoverFromOutOfMemory(); + } + return nullptr; + } + + Digit digit = 0; + // Keeps track of how many unprocessed bits there are in |digit|. + unsigned availableBits = 0; + size_t pos = charsRequired; + for (unsigned i = 0; i < length - 1; i++) { + Digit newDigit = x->digit(i); + // Take any leftover bits from the last iteration into account. + unsigned current = (digit | (newDigit << availableBits)) & charMask; + MOZ_ASSERT(pos); + resultChars[--pos] = radixDigits[current]; + unsigned consumedBits = bitsPerChar - availableBits; + digit = newDigit >> consumedBits; + availableBits = DigitBits - consumedBits; + while (availableBits >= bitsPerChar) { + MOZ_ASSERT(pos); + resultChars[--pos] = radixDigits[digit & charMask]; + digit >>= bitsPerChar; + availableBits -= bitsPerChar; + } + } + + // Write out the character containing the lowest-order bit of |msd|. + // + // This character may include leftover bits from the Digit below |msd|. For + // example, if |x === 2n**64n| and |radix == 32|: the preceding loop writes + // twelve zeroes for low-order bits 0-59 in |x->digit(0)| (and |x->digit(1)| + // on 32-bit); then the highest 4 bits of of |x->digit(0)| (or |x->digit(1)| + // on 32-bit) and bit 0 of |x->digit(1)| (|x->digit(2)| on 32-bit) will + // comprise the |current == 0b1'0000| computed below for the high-order 'g' + // character. + unsigned current = (digit | (msd << availableBits)) & charMask; + MOZ_ASSERT(pos); + resultChars[--pos] = radixDigits[current]; + + // Write out remaining characters represented by |msd|. (There may be none, + // as in the example above.) + digit = msd >> (bitsPerChar - availableBits); + while (digit != 0) { + MOZ_ASSERT(pos); + resultChars[--pos] = radixDigits[digit & charMask]; + digit >>= bitsPerChar; + } + + if (sign) { + MOZ_ASSERT(pos); + resultChars[--pos] = '-'; + } + + MOZ_ASSERT(pos == 0); + return NewStringCopyN(cx, resultChars.get(), charsRequired); +} + +template +JSLinearString* BigInt::toStringSingleDigitBaseTen(JSContext* cx, Digit digit, + bool isNegative) { + if (digit <= Digit(INT32_MAX)) { + int32_t val = AssertedCast(digit); + return Int32ToString(cx, isNegative ? -val : val); + } + + MOZ_ASSERT(digit != 0, "zero case should have been handled in toString"); + + constexpr size_t maxLength = 1 + (std::numeric_limits::digits10 + 1); + static_assert(maxLength == 11 || maxLength == 21, + "unexpected decimal string length"); + + char resultChars[maxLength]; + size_t writePos = maxLength; + + while (digit != 0) { + MOZ_ASSERT(writePos > 0); + resultChars[--writePos] = radixDigits[digit % 10]; + digit /= 10; + } + MOZ_ASSERT(writePos < maxLength); + MOZ_ASSERT(resultChars[writePos] != '0'); + + if (isNegative) { + MOZ_ASSERT(writePos > 0); + resultChars[--writePos] = '-'; + } + + MOZ_ASSERT(writePos < maxLength); + return NewStringCopyN(cx, resultChars + writePos, + maxLength - writePos); +} + +static constexpr BigInt::Digit MaxPowerInDigit(uint8_t radix) { + BigInt::Digit result = 1; + while (result < BigInt::Digit(-1) / radix) { + result *= radix; + } + return result; +} + +static constexpr uint8_t MaxExponentInDigit(uint8_t radix) { + uint8_t exp = 0; + BigInt::Digit result = 1; + while (result < BigInt::Digit(-1) / radix) { + result *= radix; + exp += 1; + } + return exp; +} + +struct RadixInfo { + BigInt::Digit maxPowerInDigit; + uint8_t maxExponentInDigit; + + constexpr RadixInfo(BigInt::Digit maxPower, uint8_t maxExponent) + : maxPowerInDigit(maxPower), maxExponentInDigit(maxExponent) {} + + explicit constexpr RadixInfo(uint8_t radix) + : RadixInfo(MaxPowerInDigit(radix), MaxExponentInDigit(radix)) {} +}; + +static constexpr const RadixInfo toStringInfo[37] = { + {0, 0}, {0, 0}, RadixInfo(2), RadixInfo(3), RadixInfo(4), + RadixInfo(5), RadixInfo(6), RadixInfo(7), RadixInfo(8), RadixInfo(9), + RadixInfo(10), RadixInfo(11), RadixInfo(12), RadixInfo(13), RadixInfo(14), + RadixInfo(15), RadixInfo(16), RadixInfo(17), RadixInfo(18), RadixInfo(19), + RadixInfo(20), RadixInfo(21), RadixInfo(22), RadixInfo(23), RadixInfo(24), + RadixInfo(25), RadixInfo(26), RadixInfo(27), RadixInfo(28), RadixInfo(29), + RadixInfo(30), RadixInfo(31), RadixInfo(32), RadixInfo(33), RadixInfo(34), + RadixInfo(35), RadixInfo(36), +}; + +JSLinearString* BigInt::toStringGeneric(JSContext* cx, HandleBigInt x, + unsigned radix) { + MOZ_ASSERT(radix >= 2 && radix <= 36); + MOZ_ASSERT(!x->isZero()); + + size_t maximumCharactersRequired = + calculateMaximumCharactersRequired(x, radix); + if (maximumCharactersRequired > JSString::MAX_LENGTH) { + ReportAllocationOverflow(cx); + return nullptr; + } + + UniqueChars resultString(js_pod_malloc(maximumCharactersRequired)); + if (!resultString) { + ReportOutOfMemory(cx); + return nullptr; + } + + size_t writePos = maximumCharactersRequired; + unsigned length = x->digitLength(); + Digit lastDigit; + if (length == 1) { + lastDigit = x->digit(0); + } else { + unsigned chunkChars = toStringInfo[radix].maxExponentInDigit; + Digit chunkDivisor = toStringInfo[radix].maxPowerInDigit; + + unsigned nonZeroDigit = length - 1; + MOZ_ASSERT(x->digit(nonZeroDigit) != 0); + + // `rest` holds the part of the BigInt that we haven't looked at yet. + // Not to be confused with "remainder"! + RootedBigInt rest(cx); + + // In the first round, divide the input, allocating a new BigInt for + // the result == rest; from then on divide the rest in-place. + // + // FIXME: absoluteDivWithDigitDivisor doesn't + // destructivelyTrimHighZeroDigits for in-place divisions, leading to + // worse constant factors. See + // https://bugzilla.mozilla.org/show_bug.cgi?id=1510213. + RootedBigInt dividend(cx, x); + do { + Digit chunk; + if (!absoluteDivWithDigitDivisor(cx, dividend, chunkDivisor, Some(&rest), + &chunk, dividend->isNegative())) { + return nullptr; + } + + dividend = rest; + for (unsigned i = 0; i < chunkChars; i++) { + MOZ_ASSERT(writePos > 0); + resultString[--writePos] = radixDigits[chunk % radix]; + chunk /= radix; + } + MOZ_ASSERT(!chunk); + + if (!rest->digit(nonZeroDigit)) { + nonZeroDigit--; + } + + MOZ_ASSERT(rest->digit(nonZeroDigit) != 0, + "division by a single digit can't remove more than one " + "digit from a number"); + } while (nonZeroDigit > 0); + + lastDigit = rest->digit(0); + } + + do { + MOZ_ASSERT(writePos > 0); + resultString[--writePos] = radixDigits[lastDigit % radix]; + lastDigit /= radix; + } while (lastDigit > 0); + MOZ_ASSERT(writePos < maximumCharactersRequired); + MOZ_ASSERT(maximumCharactersRequired - writePos <= + static_cast(maximumCharactersRequired)); + + // Remove leading zeroes. + while (writePos + 1 < maximumCharactersRequired && + resultString[writePos] == '0') { + writePos++; + } + + if (x->isNegative()) { + MOZ_ASSERT(writePos > 0); + resultString[--writePos] = '-'; + } + + MOZ_ASSERT(writePos < maximumCharactersRequired); + // Would be better to somehow adopt resultString directly. + return NewStringCopyN(cx, resultString.get() + writePos, + maximumCharactersRequired - writePos); +} + +static void FreeDigits(JSContext* cx, BigInt* bi, BigInt::Digit* digits, + size_t nbytes) { + MOZ_ASSERT(cx->isMainThreadContext()); + + if (bi->isTenured()) { + MOZ_ASSERT(!cx->nursery().isInside(digits)); + js_free(digits); + } else { + cx->nursery().freeBuffer(digits, nbytes); + } +} + +BigInt* BigInt::destructivelyTrimHighZeroDigits(JSContext* cx, BigInt* x) { + if (x->isZero()) { + MOZ_ASSERT(!x->isNegative()); + return x; + } + MOZ_ASSERT(x->digitLength()); + + int nonZeroIndex = x->digitLength() - 1; + while (nonZeroIndex >= 0 && x->digit(nonZeroIndex) == 0) { + nonZeroIndex--; + } + + if (nonZeroIndex < 0) { + return zero(cx); + } + + if (nonZeroIndex == static_cast(x->digitLength() - 1)) { + return x; + } + + unsigned newLength = nonZeroIndex + 1; + + if (newLength > InlineDigitsLength) { + MOZ_ASSERT(x->hasHeapDigits()); + + size_t oldLength = x->digitLength(); + Digit* newdigits = + js::ReallocateBigIntDigits(cx, x, x->heapDigits_, oldLength, newLength); + if (!newdigits) { + return nullptr; + } + x->heapDigits_ = newdigits; + + RemoveCellMemory(x, oldLength * sizeof(Digit), js::MemoryUse::BigIntDigits); + AddCellMemory(x, newLength * sizeof(Digit), js::MemoryUse::BigIntDigits); + } else { + if (x->hasHeapDigits()) { + Digit digits[InlineDigitsLength]; + std::copy_n(x->heapDigits_, InlineDigitsLength, digits); + + size_t nbytes = x->digitLength() * sizeof(Digit); + FreeDigits(cx, x, x->heapDigits_, nbytes); + RemoveCellMemory(x, nbytes, js::MemoryUse::BigIntDigits); + + std::copy_n(digits, InlineDigitsLength, x->inlineDigits_); + } + } + + x->setLengthAndFlags(newLength, x->isNegative() ? SignBit : 0); + + return x; +} + +// The maximum value `radix**charCount - 1` must be represented as a max number +// `2**(N * DigitBits) - 1` for `N` digits, so +// +// 2**(N * DigitBits) - 1 ≥ radix**charcount - 1 +// 2**(N * DigitBits) ≥ radix**charcount +// N * DigitBits ≥ log2(radix**charcount) +// N * DigitBits ≥ charcount * log2(radix) +// N ≥ ⌈charcount * log2(radix) / DigitBits⌉ (conservatively) +// +// or in the code's terms (all numbers promoted to exact mathematical values), +// +// N ≥ ⌈charcount * bitsPerChar / (DigitBits * bitsPerCharTableMultiplier)⌉ +// +// Note that `N` is computed even more conservatively here because `bitsPerChar` +// is rounded up. +bool BigInt::calculateMaximumDigitsRequired(JSContext* cx, uint8_t radix, + size_t charcount, size_t* result) { + MOZ_ASSERT(2 <= radix && radix <= 36); + + uint8_t bitsPerChar = maxBitsPerCharTable[radix]; + + MOZ_ASSERT(charcount > 0); + MOZ_ASSERT(charcount <= std::numeric_limits::max() / bitsPerChar); + static_assert( + MaxDigitLength < std::numeric_limits::max(), + "can't safely cast calculateMaximumDigitsRequired result to size_t"); + + uint64_t n = CeilDiv(static_cast(charcount) * bitsPerChar, + DigitBits * bitsPerCharTableMultiplier); + if (n > MaxDigitLength) { + ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE); + return false; + } + + *result = n; + return true; +} + +template +BigInt* BigInt::parseLiteralDigits(JSContext* cx, + const Range chars, + unsigned radix, bool isNegative, + bool* haveParseError, gc::Heap heap) { + static_assert( + std::is_same_v || std::is_same_v, + "only the bare minimum character types are supported, to avoid " + "excessively instantiating this template"); + + MOZ_ASSERT(chars.length()); + + RangedPtr start = chars.begin(); + RangedPtr end = chars.end(); + + // Skipping leading zeroes. + while (start[0] == '0') { + start++; + if (start == end) { + return zero(cx, heap); + } + } + + unsigned limit0 = '0' + std::min(radix, 10u); + unsigned limita = 'a' + (radix - 10); + unsigned limitA = 'A' + (radix - 10); + + size_t length; + if (!calculateMaximumDigitsRequired(cx, radix, end - start, &length)) { + return nullptr; + } + BigInt* result = createUninitialized(cx, length, isNegative, heap); + if (!result) { + return nullptr; + } + + result->initializeDigitsToZero(); + + for (; start < end; start++) { + uint32_t digit; + CharT c = *start; + if (c >= '0' && c < limit0) { + digit = c - '0'; + } else if (c >= 'a' && c < limita) { + digit = c - 'a' + 10; + } else if (c >= 'A' && c < limitA) { + digit = c - 'A' + 10; + } else { + *haveParseError = true; + return nullptr; + } + + result->inplaceMultiplyAdd(static_cast(radix), + static_cast(digit)); + } + + return destructivelyTrimHighZeroDigits(cx, result); +} + +// BigInt proposal section 7.2 +template +BigInt* BigInt::parseLiteral(JSContext* cx, const Range chars, + bool* haveParseError, js::gc::Heap heap) { + RangedPtr start = chars.begin(); + const RangedPtr end = chars.end(); + bool isNegative = false; + + MOZ_ASSERT(chars.length()); + + if (end - start > 2 && start[0] == '0') { + if (start[1] == 'b' || start[1] == 'B') { + // StringNumericLiteral ::: BinaryIntegerLiteral + return parseLiteralDigits(cx, Range(start + 2, end), 2, + isNegative, haveParseError, heap); + } + if (start[1] == 'x' || start[1] == 'X') { + // StringNumericLiteral ::: HexIntegerLiteral + return parseLiteralDigits(cx, Range(start + 2, end), 16, + isNegative, haveParseError, heap); + } + if (start[1] == 'o' || start[1] == 'O') { + // StringNumericLiteral ::: OctalIntegerLiteral + return parseLiteralDigits(cx, Range(start + 2, end), 8, + isNegative, haveParseError, heap); + } + } + + return parseLiteralDigits(cx, Range(start, end), 10, isNegative, + haveParseError, heap); +} + +// trim and remove radix selection prefix. +template +bool BigInt::literalIsZero(const Range chars) { + RangedPtr start = chars.begin(); + const RangedPtr end = chars.end(); + + MOZ_ASSERT(chars.length()); + + // Skip over radix selector. + if (end - start > 2 && start[0] == '0') { + if (start[1] == 'b' || start[1] == 'B' || start[1] == 'x' || + start[1] == 'X' || start[1] == 'o' || start[1] == 'O') { + start += 2; + } + } + + // Skipping leading zeroes. + while (start[0] == '0') { + start++; + if (start == end) { + return true; + } + } + + return false; +} + +template bool BigInt::literalIsZero(const Range chars); + +BigInt* BigInt::createFromDouble(JSContext* cx, double d) { + MOZ_ASSERT(IsInteger(d), "Only integer-valued doubles can convert to BigInt"); + + if (d == 0) { + return zero(cx); + } + + int exponent = mozilla::ExponentComponent(d); + MOZ_ASSERT(exponent >= 0); + int length = exponent / DigitBits + 1; + BigInt* result = createUninitialized(cx, length, d < 0); + if (!result) { + return nullptr; + } + + // We construct a BigInt from the double `d` by shifting its mantissa + // according to its exponent and mapping the bit pattern onto digits. + // + // <----------- bitlength = exponent + 1 -----------> + // <----- 52 ------> <------ trailing zeroes ------> + // mantissa: 1yyyyyyyyyyyyyyyyy 0000000000000000000000000000000 + // digits: 0001xxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + // <--> <------> + // msdTopBits DigitBits + // + using Double = mozilla::FloatingPoint; + uint64_t mantissa = + mozilla::BitwiseCast(d) & Double::kSignificandBits; + // Add implicit high bit. + mantissa |= 1ull << Double::kSignificandWidth; + + const int mantissaTopBit = Double::kSignificandWidth; // 0-indexed. + + // 0-indexed position of `d`'s most significant bit within the `msd`. + int msdTopBit = exponent % DigitBits; + + // Next digit under construction. + Digit digit; + + // First, build the MSD by shifting the mantissa appropriately. + if (msdTopBit < mantissaTopBit) { + int remainingMantissaBits = mantissaTopBit - msdTopBit; + digit = mantissa >> remainingMantissaBits; + mantissa = mantissa << (64 - remainingMantissaBits); + } else { + MOZ_ASSERT(msdTopBit >= mantissaTopBit); + digit = mantissa << (msdTopBit - mantissaTopBit); + mantissa = 0; + } + MOZ_ASSERT(digit != 0, "most significant digit should not be zero"); + result->setDigit(--length, digit); + + // Fill in digits containing mantissa contributions. + while (mantissa) { + MOZ_ASSERT(length > 0, + "double bits were all non-fractional, so there must be " + "digits present to hold them"); + + if (DigitBits == 64) { + result->setDigit(--length, mantissa); + break; + } + + MOZ_ASSERT(DigitBits == 32); + Digit current = mantissa >> 32; + mantissa = mantissa << 32; + result->setDigit(--length, current); + } + + // Fill in low-order zeroes. + for (int i = length - 1; i >= 0; i--) { + result->setDigit(i, 0); + } + + return result; +} + +BigInt* BigInt::createFromUint64(JSContext* cx, uint64_t n) { + if (n == 0) { + return zero(cx); + } + + const bool isNegative = false; + + if (DigitBits == 32) { + Digit low = n; + Digit high = n >> 32; + size_t length = high ? 2 : 1; + + BigInt* res = createUninitialized(cx, length, isNegative); + if (!res) { + return nullptr; + } + res->setDigit(0, low); + if (high) { + res->setDigit(1, high); + } + return res; + } + + return createFromDigit(cx, n, isNegative); +} + +BigInt* BigInt::createFromInt64(JSContext* cx, int64_t n) { + BigInt* res = createFromUint64(cx, Abs(n)); + if (!res) { + return nullptr; + } + + if (n < 0) { + res->setHeaderFlagBit(SignBit); + } + MOZ_ASSERT(res->isNegative() == (n < 0)); + + return res; +} + +// BigInt proposal section 5.1.2 +BigInt* js::NumberToBigInt(JSContext* cx, double d) { + // Step 1 is an assertion checked by the caller. + // Step 2. + if (!IsInteger(d)) { + ToCStringBuf cbuf; + const char* str = NumberToCString(&cbuf, d); + MOZ_ASSERT(str); + + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_NONINTEGER_NUMBER_TO_BIGINT, str); + return nullptr; + } + + // Step 3. + return BigInt::createFromDouble(cx, d); +} + +BigInt* BigInt::copy(JSContext* cx, HandleBigInt x, gc::Heap heap) { + if (x->isZero()) { + return zero(cx, heap); + } + + BigInt* result = + createUninitialized(cx, x->digitLength(), x->isNegative(), heap); + if (!result) { + return nullptr; + } + for (size_t i = 0; i < x->digitLength(); i++) { + result->setDigit(i, x->digit(i)); + } + return result; +} + +// BigInt proposal section 1.1.7 +BigInt* BigInt::add(JSContext* cx, HandleBigInt x, HandleBigInt y) { + bool xNegative = x->isNegative(); + + // x + y == x + y + // -x + -y == -(x + y) + if (xNegative == y->isNegative()) { + return absoluteAdd(cx, x, y, xNegative); + } + + // x + -y == x - y == -(y - x) + // -x + y == y - x == -(x - y) + int8_t compare = absoluteCompare(x, y); + if (compare == 0) { + return zero(cx); + } + + if (compare > 0) { + return absoluteSub(cx, x, y, xNegative); + } + + return absoluteSub(cx, y, x, !xNegative); +} + +// BigInt proposal section 1.1.8 +BigInt* BigInt::sub(JSContext* cx, HandleBigInt x, HandleBigInt y) { + bool xNegative = x->isNegative(); + if (xNegative != y->isNegative()) { + // x - (-y) == x + y + // (-x) - y == -(x + y) + return absoluteAdd(cx, x, y, xNegative); + } + + // x - y == -(y - x) + // (-x) - (-y) == y - x == -(x - y) + int8_t compare = absoluteCompare(x, y); + if (compare == 0) { + return zero(cx); + } + + if (compare > 0) { + return absoluteSub(cx, x, y, xNegative); + } + + return absoluteSub(cx, y, x, !xNegative); +} + +// BigInt proposal section 1.1.4 +BigInt* BigInt::mul(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero()) { + return x; + } + if (y->isZero()) { + return y; + } + + bool resultNegative = x->isNegative() != y->isNegative(); + + // Fast path for the likely-common case of up to a uint64_t of magnitude. + if (x->absFitsInUint64() && y->absFitsInUint64()) { + uint64_t lhs = x->uint64FromAbsNonZero(); + uint64_t rhs = y->uint64FromAbsNonZero(); + + uint64_t res; + if (js::SafeMul(lhs, rhs, &res)) { + MOZ_ASSERT(res != 0); + return createFromNonZeroRawUint64(cx, res, resultNegative); + } + } + + unsigned resultLength = x->digitLength() + y->digitLength(); + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + result->initializeDigitsToZero(); + + for (size_t i = 0; i < x->digitLength(); i++) { + multiplyAccumulate(y, x->digit(i), result, i); + } + + return destructivelyTrimHighZeroDigits(cx, result); +} + +// BigInt proposal section 1.1.5 +BigInt* BigInt::div(JSContext* cx, HandleBigInt x, HandleBigInt y) { + // 1. If y is 0n, throw a RangeError exception. + if (y->isZero()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_DIVISION_BY_ZERO); + return nullptr; + } + + // 2. Let quotient be the mathematical value of x divided by y. + // 3. Return a BigInt representing quotient rounded towards 0 to the next + // integral value. + if (x->isZero()) { + return x; + } + + if (absoluteCompare(x, y) < 0) { + return zero(cx); + } + + RootedBigInt quotient(cx); + bool resultNegative = x->isNegative() != y->isNegative(); + if (y->digitLength() == 1) { + Digit divisor = y->digit(0); + if (divisor == 1) { + return resultNegative == x->isNegative() ? x : neg(cx, x); + } + + Digit remainder; + if (!absoluteDivWithDigitDivisor(cx, x, divisor, Some("ient), + &remainder, resultNegative)) { + return nullptr; + } + } else { + if (!absoluteDivWithBigIntDivisor(cx, x, y, Some("ient), Nothing(), + resultNegative)) { + return nullptr; + } + } + + return destructivelyTrimHighZeroDigits(cx, quotient); +} + +// BigInt proposal section 1.1.6 +BigInt* BigInt::mod(JSContext* cx, HandleBigInt x, HandleBigInt y) { + // 1. If y is 0n, throw a RangeError exception. + if (y->isZero()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_DIVISION_BY_ZERO); + return nullptr; + } + + // 2. If x is 0n, return x. + if (x->isZero()) { + return x; + } + // 3. Let r be the BigInt defined by the mathematical relation r = x - (y × + // q) where q is a BigInt that is negative only if x/y is negative and + // positive only if x/y is positive, and whose magnitude is as large as + // possible without exceeding the magnitude of the true mathematical + // quotient of x and y. + if (absoluteCompare(x, y) < 0) { + return x; + } + + if (y->digitLength() == 1) { + Digit divisor = y->digit(0); + if (divisor == 1) { + return zero(cx); + } + + Digit remainderDigit; + bool unusedQuotientNegative = false; + if (!absoluteDivWithDigitDivisor(cx, x, divisor, Nothing(), &remainderDigit, + unusedQuotientNegative)) { + MOZ_CRASH("BigInt div by digit failed unexpectedly"); + } + + if (!remainderDigit) { + return zero(cx); + } + + return createFromDigit(cx, remainderDigit, x->isNegative()); + } else { + RootedBigInt remainder(cx); + if (!absoluteDivWithBigIntDivisor(cx, x, y, Nothing(), Some(&remainder), + x->isNegative())) { + return nullptr; + } + MOZ_ASSERT(remainder); + return destructivelyTrimHighZeroDigits(cx, remainder); + } +} + +// BigInt proposal section 1.1.3 +BigInt* BigInt::pow(JSContext* cx, HandleBigInt x, HandleBigInt y) { + // 1. If exponent is < 0, throw a RangeError exception. + if (y->isNegative()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_NEGATIVE_EXPONENT); + return nullptr; + } + + // 2. If base is 0n and exponent is 0n, return 1n. + if (y->isZero()) { + return one(cx); + } + + if (x->isZero()) { + return x; + } + + // 3. Return a BigInt representing the mathematical value of base raised + // to the power exponent. + if (x->digitLength() == 1 && x->digit(0) == 1) { + // (-1) ** even_number == 1. + if (x->isNegative() && (y->digit(0) & 1) == 0) { + return neg(cx, x); + } + // (-1) ** odd_number == -1; 1 ** anything == 1. + return x; + } + + // For all bases >= 2, very large exponents would lead to unrepresentable + // results. + static_assert(MaxBitLength < std::numeric_limits::max(), + "unexpectedly large MaxBitLength"); + if (y->digitLength() > 1) { + ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + Digit exponent = y->digit(0); + if (exponent == 1) { + return x; + } + if (exponent >= MaxBitLength) { + ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + + static_assert(MaxBitLength <= std::numeric_limits::max(), + "unexpectedly large MaxBitLength"); + int n = static_cast(exponent); + bool isOddPower = n & 1; + + if (x->digitLength() == 1 && mozilla::IsPowerOfTwo(x->digit(0))) { + // Fast path for (2^m)^n. + + // Result is negative for odd powers. + bool resultNegative = x->isNegative() && isOddPower; + + unsigned m = mozilla::FloorLog2(x->digit(0)); + MOZ_ASSERT(m < DigitBits); + + static_assert(MaxBitLength * DigitBits > MaxBitLength, + "n * m can't overflow"); + n *= int(m); + + int length = 1 + (n / DigitBits); + BigInt* result = createUninitialized(cx, length, resultNegative); + if (!result) { + return nullptr; + } + result->initializeDigitsToZero(); + result->setDigit(length - 1, static_cast(1) << (n % DigitBits)); + return result; + } + + RootedBigInt runningSquare(cx, x); + RootedBigInt result(cx, isOddPower ? x : nullptr); + n /= 2; + + // Fast path for the likely-common case of up to a uint64_t of magnitude. + if (x->absFitsInUint64()) { + bool resultNegative = x->isNegative() && isOddPower; + + uint64_t runningSquareInt = x->uint64FromAbsNonZero(); + uint64_t resultInt = isOddPower ? runningSquareInt : 1; + while (true) { + uint64_t runningSquareStart = runningSquareInt; + uint64_t r; + if (!js::SafeMul(runningSquareInt, runningSquareInt, &r)) { + break; + } + runningSquareInt = r; + + if (n & 1) { + if (!js::SafeMul(resultInt, runningSquareInt, &r)) { + // Recover |runningSquare| before we restart the loop. + runningSquareInt = runningSquareStart; + break; + } + resultInt = r; + } + + n /= 2; + if (n == 0) { + return createFromNonZeroRawUint64(cx, resultInt, resultNegative); + } + } + + runningSquare = createFromNonZeroRawUint64(cx, runningSquareInt, false); + if (!runningSquare) { + return nullptr; + } + + result = createFromNonZeroRawUint64(cx, resultInt, resultNegative); + if (!result) { + return nullptr; + } + } + + // This implicitly sets the result's sign correctly. + while (true) { + runningSquare = mul(cx, runningSquare, runningSquare); + if (!runningSquare) { + return nullptr; + } + + if (n & 1) { + if (!result) { + result = runningSquare; + } else { + result = mul(cx, result, runningSquare); + if (!result) { + return nullptr; + } + } + } + + n /= 2; + if (n == 0) { + return result; + } + } +} + +BigInt* BigInt::lshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero() || y->isZero()) { + return x; + } + + if (y->digitLength() > 1 || y->digit(0) > MaxBitLength) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_TOO_LARGE); + if (js::SupportDifferentialTesting()) { + fprintf(stderr, "ReportOutOfMemory called\n"); + } + return nullptr; + } + Digit shift = y->digit(0); + int digitShift = static_cast(shift / DigitBits); + int bitsShift = static_cast(shift % DigitBits); + int length = x->digitLength(); + bool grow = bitsShift && (x->digit(length - 1) >> (DigitBits - bitsShift)); + int resultLength = length + digitShift + grow; + BigInt* result = createUninitialized(cx, resultLength, x->isNegative()); + if (!result) { + return nullptr; + } + + int i = 0; + for (; i < digitShift; i++) { + result->setDigit(i, 0); + } + + if (bitsShift == 0) { + for (int j = 0; i < resultLength; i++, j++) { + result->setDigit(i, x->digit(j)); + } + } else { + Digit carry = 0; + for (int j = 0; j < length; i++, j++) { + Digit d = x->digit(j); + result->setDigit(i, (d << bitsShift) | carry); + carry = d >> (DigitBits - bitsShift); + } + if (grow) { + result->setDigit(i, carry); + } else { + MOZ_ASSERT(!carry); + } + } + return result; +} + +BigInt* BigInt::rshByMaximum(JSContext* cx, bool isNegative) { + return isNegative ? negativeOne(cx) : zero(cx); +} + +BigInt* BigInt::rshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero() || y->isZero()) { + return x; + } + + if (y->digitLength() > 1 || y->digit(0) >= MaxBitLength) { + return rshByMaximum(cx, x->isNegative()); + } + Digit shift = y->digit(0); + int length = x->digitLength(); + int digitShift = static_cast(shift / DigitBits); + int bitsShift = static_cast(shift % DigitBits); + int resultLength = length - digitShift; + if (resultLength <= 0) { + return rshByMaximum(cx, x->isNegative()); + } + // For negative numbers, round down if any bit was shifted out (so that e.g. + // -5n >> 1n == -3n and not -2n). Check now whether this will happen and + // whether it can cause overflow into a new digit. If we allocate the result + // large enough up front, it avoids having to do a second allocation later. + bool mustRoundDown = false; + if (x->isNegative()) { + const Digit mask = (static_cast(1) << bitsShift) - 1; + if ((x->digit(digitShift) & mask)) { + mustRoundDown = true; + } else { + for (int i = 0; i < digitShift; i++) { + if (x->digit(i)) { + mustRoundDown = true; + break; + } + } + } + } + // If bits_shift is non-zero, it frees up bits, preventing overflow. + if (mustRoundDown && bitsShift == 0) { + // Overflow cannot happen if the most significant digit has unset bits. + Digit msd = x->digit(length - 1); + bool roundingCanOverflow = msd == std::numeric_limits::max(); + if (roundingCanOverflow) { + resultLength++; + } + } + + MOZ_ASSERT(resultLength <= length); + RootedBigInt result(cx, + createUninitialized(cx, resultLength, x->isNegative())); + if (!result) { + return nullptr; + } + if (!bitsShift) { + // If roundingCanOverflow, manually initialize the overflow digit. + result->setDigit(resultLength - 1, 0); + for (int i = digitShift; i < length; i++) { + result->setDigit(i - digitShift, x->digit(i)); + } + } else { + Digit carry = x->digit(digitShift) >> bitsShift; + int last = length - digitShift - 1; + for (int i = 0; i < last; i++) { + Digit d = x->digit(i + digitShift + 1); + result->setDigit(i, (d << (DigitBits - bitsShift)) | carry); + carry = d >> bitsShift; + } + result->setDigit(last, carry); + } + + if (mustRoundDown) { + MOZ_ASSERT(x->isNegative()); + // Since the result is negative, rounding down means adding one to + // its absolute value. This cannot overflow. TODO: modify the result in + // place. + return absoluteAddOne(cx, result, x->isNegative()); + } + return destructivelyTrimHighZeroDigits(cx, result); +} + +// BigInt proposal section 1.1.9. BigInt::leftShift ( x, y ) +BigInt* BigInt::lsh(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (y->isNegative()) { + return rshByAbsolute(cx, x, y); + } + return lshByAbsolute(cx, x, y); +} + +// BigInt proposal section 1.1.10. BigInt::signedRightShift ( x, y ) +BigInt* BigInt::rsh(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (y->isNegative()) { + return lshByAbsolute(cx, x, y); + } + return rshByAbsolute(cx, x, y); +} + +// BigInt proposal section 1.1.17. BigInt::bitwiseAND ( x, y ) +BigInt* BigInt::bitAnd(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero()) { + return x; + } + + if (y->isZero()) { + return y; + } + + if (!x->isNegative() && !y->isNegative()) { + return absoluteAnd(cx, x, y); + } + + if (x->isNegative() && y->isNegative()) { + // (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1)) + // == -(((x-1) | (y-1)) + 1) + RootedBigInt x1(cx, absoluteSubOne(cx, x)); + if (!x1) { + return nullptr; + } + RootedBigInt y1(cx, absoluteSubOne(cx, y)); + if (!y1) { + return nullptr; + } + RootedBigInt result(cx, absoluteOr(cx, x1, y1)); + if (!result) { + return nullptr; + } + bool resultNegative = true; + return absoluteAddOne(cx, result, resultNegative); + } + + MOZ_ASSERT(x->isNegative() != y->isNegative()); + HandleBigInt& pos = x->isNegative() ? y : x; + HandleBigInt& neg = x->isNegative() ? x : y; + + RootedBigInt neg1(cx, absoluteSubOne(cx, neg)); + if (!neg1) { + return nullptr; + } + + // x & (-y) == x & ~(y-1) == x & ~(y-1) + return absoluteAndNot(cx, pos, neg1); +} + +// BigInt proposal section 1.1.18. BigInt::bitwiseXOR ( x, y ) +BigInt* BigInt::bitXor(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero()) { + return y; + } + + if (y->isZero()) { + return x; + } + + if (!x->isNegative() && !y->isNegative()) { + return absoluteXor(cx, x, y); + } + + if (x->isNegative() && y->isNegative()) { + // (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1) + RootedBigInt x1(cx, absoluteSubOne(cx, x)); + if (!x1) { + return nullptr; + } + RootedBigInt y1(cx, absoluteSubOne(cx, y)); + if (!y1) { + return nullptr; + } + return absoluteXor(cx, x1, y1); + } + MOZ_ASSERT(x->isNegative() != y->isNegative()); + + HandleBigInt& pos = x->isNegative() ? y : x; + HandleBigInt& neg = x->isNegative() ? x : y; + + // x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1) + RootedBigInt result(cx, absoluteSubOne(cx, neg)); + if (!result) { + return nullptr; + } + result = absoluteXor(cx, result, pos); + if (!result) { + return nullptr; + } + bool resultNegative = true; + return absoluteAddOne(cx, result, resultNegative); +} + +// BigInt proposal section 1.1.19. BigInt::bitwiseOR ( x, y ) +BigInt* BigInt::bitOr(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero()) { + return y; + } + + if (y->isZero()) { + return x; + } + + bool resultNegative = x->isNegative() || y->isNegative(); + + if (!resultNegative) { + return absoluteOr(cx, x, y); + } + + if (x->isNegative() && y->isNegative()) { + // (-x) | (-y) == ~(x-1) | ~(y-1) == ~((x-1) & (y-1)) + // == -(((x-1) & (y-1)) + 1) + RootedBigInt result(cx, absoluteSubOne(cx, x)); + if (!result) { + return nullptr; + } + RootedBigInt y1(cx, absoluteSubOne(cx, y)); + if (!y1) { + return nullptr; + } + result = absoluteAnd(cx, result, y1); + if (!result) { + return nullptr; + } + return absoluteAddOne(cx, result, resultNegative); + } + + MOZ_ASSERT(x->isNegative() != y->isNegative()); + HandleBigInt& pos = x->isNegative() ? y : x; + HandleBigInt& neg = x->isNegative() ? x : y; + + // x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1) + RootedBigInt result(cx, absoluteSubOne(cx, neg)); + if (!result) { + return nullptr; + } + result = absoluteAndNot(cx, result, pos); + if (!result) { + return nullptr; + } + return absoluteAddOne(cx, result, resultNegative); +} + +// BigInt proposal section 1.1.2. BigInt::bitwiseNOT ( x ) +BigInt* BigInt::bitNot(JSContext* cx, HandleBigInt x) { + if (x->isNegative()) { + // ~(-x) == ~(~(x-1)) == x-1 + return absoluteSubOne(cx, x); + } else { + // ~x == -x-1 == -(x+1) + bool resultNegative = true; + return absoluteAddOne(cx, x, resultNegative); + } +} + +int64_t BigInt::toInt64(const BigInt* x) { return WrapToSigned(toUint64(x)); } + +uint64_t BigInt::toUint64(const BigInt* x) { + if (x->isZero()) { + return 0; + } + + uint64_t digit = x->uint64FromAbsNonZero(); + + // Return the two's complement if x is negative. + if (x->isNegative()) { + return ~(digit - 1); + } + + return digit; +} + +bool BigInt::isInt64(BigInt* x, int64_t* result) { + MOZ_MAKE_MEM_UNDEFINED(result, sizeof(*result)); + + if (!x->absFitsInUint64()) { + return false; + } + + if (x->isZero()) { + *result = 0; + return true; + } + + uint64_t magnitude = x->uint64FromAbsNonZero(); + + if (x->isNegative()) { + constexpr uint64_t Int64MinMagnitude = uint64_t(1) << 63; + if (magnitude <= Int64MinMagnitude) { + *result = magnitude == Int64MinMagnitude + ? std::numeric_limits::min() + : -AssertedCast(magnitude); + return true; + } + } else { + if (magnitude <= + static_cast(std::numeric_limits::max())) { + *result = AssertedCast(magnitude); + return true; + } + } + + return false; +} + +bool BigInt::isUint64(BigInt* x, uint64_t* result) { + MOZ_MAKE_MEM_UNDEFINED(result, sizeof(*result)); + + if (!x->absFitsInUint64() || x->isNegative()) { + return false; + } + + if (x->isZero()) { + *result = 0; + return true; + } + + *result = x->uint64FromAbsNonZero(); + return true; +} + +bool BigInt::isNumber(BigInt* x, double* result) { + MOZ_MAKE_MEM_UNDEFINED(result, sizeof(*result)); + + if (!x->absFitsInUint64()) { + return false; + } + + if (x->isZero()) { + *result = 0; + return true; + } + + uint64_t magnitude = x->uint64FromAbsNonZero(); + if (magnitude < uint64_t(DOUBLE_INTEGRAL_PRECISION_LIMIT)) { + *result = x->isNegative() ? -double(magnitude) : double(magnitude); + return true; + } + + return false; +} + +// Compute `2**bits - (x & (2**bits - 1))`. Used when treating BigInt values as +// arbitrary-precision two's complement signed integers. +BigInt* BigInt::truncateAndSubFromPowerOfTwo(JSContext* cx, HandleBigInt x, + uint64_t bits, + bool resultNegative) { + MOZ_ASSERT(bits != 0); + MOZ_ASSERT(!x->isZero()); + + if (bits > MaxBitLength) { + ReportOversizedAllocation(cx, JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + + size_t resultLength = CeilDiv(bits, DigitBits); + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + + // Process all digits except the MSD. + size_t xLength = x->digitLength(); + Digit borrow = 0; + // Take digits from `x` until its length is exhausted. + for (size_t i = 0; i < std::min(resultLength - 1, xLength); i++) { + Digit newBorrow = 0; + Digit difference = digitSub(0, x->digit(i), &newBorrow); + difference = digitSub(difference, borrow, &newBorrow); + result->setDigit(i, difference); + borrow = newBorrow; + } + // Then simulate leading zeroes in `x` as needed. + for (size_t i = xLength; i < resultLength - 1; i++) { + Digit newBorrow = 0; + Digit difference = digitSub(0, borrow, &newBorrow); + result->setDigit(i, difference); + borrow = newBorrow; + } + + // The MSD might contain extra bits that we don't want. + Digit xMSD = resultLength <= xLength ? x->digit(resultLength - 1) : 0; + Digit resultMSD; + if (bits % DigitBits == 0) { + Digit newBorrow = 0; + resultMSD = digitSub(0, xMSD, &newBorrow); + resultMSD = digitSub(resultMSD, borrow, &newBorrow); + } else { + size_t drop = DigitBits - (bits % DigitBits); + xMSD = (xMSD << drop) >> drop; + Digit minuendMSD = Digit(1) << (DigitBits - drop); + Digit newBorrow = 0; + resultMSD = digitSub(minuendMSD, xMSD, &newBorrow); + resultMSD = digitSub(resultMSD, borrow, &newBorrow); + MOZ_ASSERT(newBorrow == 0, "result < 2^bits"); + // If all subtracted bits were zero, we have to get rid of the + // materialized minuendMSD again. + resultMSD &= (minuendMSD - 1); + } + result->setDigit(resultLength - 1, resultMSD); + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::asUintN(JSContext* cx, HandleBigInt x, uint64_t bits) { + if (x->isZero()) { + return x; + } + + if (bits == 0) { + return zero(cx); + } + + // When truncating a negative number, simulate two's complement. + if (x->isNegative()) { + bool resultNegative = false; + return truncateAndSubFromPowerOfTwo(cx, x, bits, resultNegative); + } + + if (bits <= 64) { + uint64_t u64 = toUint64(x); + uint64_t mask = uint64_t(-1) >> (64 - bits); + uint64_t n = u64 & mask; + if (u64 == n && x->absFitsInUint64()) { + return x; + } + return createFromUint64(cx, n); + } + + if (bits >= MaxBitLength) { + return x; + } + + Digit msd = x->digit(x->digitLength() - 1); + size_t msdBits = DigitBits - DigitLeadingZeroes(msd); + size_t bitLength = msdBits + (x->digitLength() - 1) * DigitBits; + + if (bits >= bitLength) { + return x; + } + + size_t length = CeilDiv(bits, DigitBits); + MOZ_ASSERT(length >= 2, "single-digit cases should be handled above"); + MOZ_ASSERT(length <= x->digitLength()); + + // Eagerly trim high zero digits. + const size_t highDigitBits = ((bits - 1) % DigitBits) + 1; + const Digit highDigitMask = Digit(-1) >> (DigitBits - highDigitBits); + Digit mask = highDigitMask; + while (length > 0) { + if (x->digit(length - 1) & mask) { + break; + } + + mask = Digit(-1); + length--; + } + + const bool isNegative = false; + BigInt* res = createUninitialized(cx, length, isNegative); + if (res == nullptr) { + return nullptr; + } + + while (length-- > 0) { + res->setDigit(length, x->digit(length) & mask); + mask = Digit(-1); + } + MOZ_ASSERT_IF(length == 0, res->isZero()); + + return res; +} + +BigInt* BigInt::asIntN(JSContext* cx, HandleBigInt x, uint64_t bits) { + if (x->isZero()) { + return x; + } + + if (bits == 0) { + return zero(cx); + } + + if (bits == 64) { + int64_t n = toInt64(x); + if (((n < 0) == x->isNegative()) && x->absFitsInUint64()) { + return x; + } + return createFromInt64(cx, n); + } + + if (bits > MaxBitLength) { + return x; + } + + Digit msd = x->digit(x->digitLength() - 1); + size_t msdBits = DigitBits - DigitLeadingZeroes(msd); + size_t bitLength = msdBits + (x->digitLength() - 1) * DigitBits; + + if (bits > bitLength) { + return x; + } + + Digit signBit = Digit(1) << ((bits - 1) % DigitBits); + if (bits == bitLength && msd < signBit) { + return x; + } + + // All the cases above were the trivial cases: truncating zero, or to zero + // bits, or to more bits than are in `x` (so we return `x` directly), or we + // already have the 64-bit fast path. If we get here, follow the textbook + // algorithm from the specification. + + // BigInt.asIntN step 3: Let `mod` be `x` modulo `2**bits`. + RootedBigInt mod(cx, asUintN(cx, x, bits)); + if (!mod) { + return nullptr; + } + + // Step 4: If `mod >= 2**(bits - 1)`, return `mod - 2**bits`; otherwise, + // return `mod`. + if (mod->digitLength() == CeilDiv(bits, DigitBits)) { + MOZ_ASSERT(!mod->isZero(), + "nonzero bits implies nonzero digit length which implies " + "nonzero overall"); + + if ((mod->digit(mod->digitLength() - 1) & signBit) != 0) { + bool resultNegative = true; + return truncateAndSubFromPowerOfTwo(cx, mod, bits, resultNegative); + } + } + + return mod; +} + +static bool ValidBigIntOperands(JSContext* cx, HandleValue lhs, + HandleValue rhs) { + MOZ_ASSERT(lhs.isBigInt() || rhs.isBigInt()); + + if (!lhs.isBigInt() || !rhs.isBigInt()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_TO_NUMBER); + return false; + } + + return true; +} + +bool BigInt::addValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::add(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::subValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::sub(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::mulValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::mul(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::divValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::div(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::modValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::mod(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::powValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::pow(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::negValue(JSContext* cx, HandleValue operand, + MutableHandleValue res) { + MOZ_ASSERT(operand.isBigInt()); + + RootedBigInt operandBigInt(cx, operand.toBigInt()); + BigInt* resBigInt = BigInt::neg(cx, operandBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::incValue(JSContext* cx, HandleValue operand, + MutableHandleValue res) { + MOZ_ASSERT(operand.isBigInt()); + + RootedBigInt operandBigInt(cx, operand.toBigInt()); + BigInt* resBigInt = BigInt::inc(cx, operandBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::decValue(JSContext* cx, HandleValue operand, + MutableHandleValue res) { + MOZ_ASSERT(operand.isBigInt()); + + RootedBigInt operandBigInt(cx, operand.toBigInt()); + BigInt* resBigInt = BigInt::dec(cx, operandBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::lshValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::lsh(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::rshValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::rsh(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::bitAndValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::bitAnd(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::bitXorValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::bitXor(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::bitOrValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::bitOr(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::bitNotValue(JSContext* cx, HandleValue operand, + MutableHandleValue res) { + MOZ_ASSERT(operand.isBigInt()); + + RootedBigInt operandBigInt(cx, operand.toBigInt()); + BigInt* resBigInt = BigInt::bitNot(cx, operandBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +// BigInt proposal section 7.3 +BigInt* js::ToBigInt(JSContext* cx, HandleValue val) { + RootedValue v(cx, val); + + // Step 1. + if (!ToPrimitive(cx, JSTYPE_NUMBER, &v)) { + return nullptr; + } + + // Step 2. + if (v.isBigInt()) { + return v.toBigInt(); + } + + if (v.isBoolean()) { + return v.toBoolean() ? BigInt::one(cx) : BigInt::zero(cx); + } + + if (v.isString()) { + RootedString str(cx, v.toString()); + BigInt* bi; + JS_TRY_VAR_OR_RETURN_NULL(cx, bi, StringToBigInt(cx, str)); + if (!bi) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_INVALID_SYNTAX); + return nullptr; + } + return bi; + } + + ReportValueError(cx, JSMSG_CANT_CONVERT_TO, JSDVG_IGNORE_STACK, v, nullptr, + "BigInt"); + return nullptr; +} + +JS::Result js::ToBigInt64(JSContext* cx, HandleValue v) { + BigInt* bi = js::ToBigInt(cx, v); + if (!bi) { + return cx->alreadyReportedError(); + } + return BigInt::toInt64(bi); +} + +JS::Result js::ToBigUint64(JSContext* cx, HandleValue v) { + BigInt* bi = js::ToBigInt(cx, v); + if (!bi) { + return cx->alreadyReportedError(); + } + return BigInt::toUint64(bi); +} + +double BigInt::numberValue(BigInt* x) { + if (x->isZero()) { + return 0.0; + } + + using Double = mozilla::FloatingPoint; + constexpr uint8_t ExponentShift = Double::kExponentShift; + constexpr uint8_t SignificandWidth = Double::kSignificandWidth; + constexpr unsigned ExponentBias = Double::kExponentBias; + constexpr uint8_t SignShift = Double::kExponentWidth + SignificandWidth; + + MOZ_ASSERT(x->digitLength() > 0); + + // Fast path for the likely-common case of up to a uint64_t of magnitude not + // exceeding integral precision in IEEE-754. (Note that we *depend* on this + // optimization being performed further down.) + if (x->absFitsInUint64()) { + uint64_t magnitude = x->uint64FromAbsNonZero(); + const uint64_t MaxIntegralPrecisionDouble = uint64_t(1) + << (SignificandWidth + 1); + if (magnitude <= MaxIntegralPrecisionDouble) { + return x->isNegative() ? -double(magnitude) : +double(magnitude); + } + } + + size_t length = x->digitLength(); + Digit msd = x->digit(length - 1); + uint8_t msdLeadingZeroes = DigitLeadingZeroes(msd); + + // `2**ExponentBias` is the largest power of two in a finite IEEE-754 + // double. If this bigint has a greater power of two, it'll round to + // infinity. + uint64_t exponent = length * DigitBits - msdLeadingZeroes - 1; + if (exponent > ExponentBias) { + return x->isNegative() ? mozilla::NegativeInfinity() + : mozilla::PositiveInfinity(); + } + + // Otherwise munge the most significant bits of the number into proper + // position in an IEEE-754 double and go to town. + + // Omit the most significant bit: the IEEE-754 format includes this bit + // implicitly for all double-precision integers. + const uint8_t msdIgnoredBits = msdLeadingZeroes + 1; + const uint8_t msdIncludedBits = DigitBits - msdIgnoredBits; + + // We compute the final mantissa of the result, shifted upward to the top of + // the `uint64_t` space -- plus an extra bit to detect potential rounding. + constexpr uint8_t BitsNeededForShiftedMantissa = SignificandWidth + 1; + + // Shift `msd`'s contributed bits upward to remove high-order zeroes and the + // highest set bit (which is implicit in IEEE-754 integral values so must be + // removed) and to add low-order zeroes. (Lower-order garbage bits are + // discarded when `shiftedMantissa` is converted to a real mantissa.) + uint64_t shiftedMantissa = + msdIncludedBits == 0 ? 0 : uint64_t(msd) << (64 - msdIncludedBits); + + // If the extra bit is set, correctly rounding the result may require + // examining all lower-order bits. Also compute 1) the index of the Digit + // storing the extra bit, and 2) whether bits beneath the extra bit in that + // Digit are nonzero so we can round if needed. + size_t digitContainingExtraBit; + Digit bitsBeneathExtraBitInDigitContainingExtraBit; + + // Add shifted bits to `shiftedMantissa` until we have a complete mantissa and + // an extra bit. + if (msdIncludedBits >= BitsNeededForShiftedMantissa) { + // DigitBits=64 (necessarily for msdIncludedBits ≥ SignificandWidth+1; + // | C++ compiler range analysis ought eliminate this + // | check on 32-bit) + // _________|__________ + // / | + // msdIncludedBits + // ________|________ + // / | + // [001···················| + // \_/\_____________/\__| + // | | | + // msdIgnoredBits | bits below the extra bit (may be no bits) + // BitsNeededForShiftedMantissa=SignificandWidth+1 + digitContainingExtraBit = length - 1; + + const uint8_t countOfBitsInDigitBelowExtraBit = + DigitBits - BitsNeededForShiftedMantissa - msdIgnoredBits; + bitsBeneathExtraBitInDigitContainingExtraBit = + msd & ((Digit(1) << countOfBitsInDigitBelowExtraBit) - 1); + } else { + MOZ_ASSERT(length >= 2, + "single-Digit numbers with this few bits should have been " + "handled by the fast-path above"); + + Digit second = x->digit(length - 2); + if (DigitBits == 64) { + shiftedMantissa |= second >> msdIncludedBits; + + digitContainingExtraBit = length - 2; + + // msdIncludedBits + DigitBits + // ________|_________ + // / | + // DigitBits=64 + // msdIncludedBits | + // __|___ _____|___ + // / \ / | + // [001········|···········| + // \_/\_____________/\___| + // | | | + // msdIgnoredBits | bits below the extra bit (always more than one) + // | + // BitsNeededForShiftedMantissa=SignificandWidth+1 + const uint8_t countOfBitsInSecondDigitBelowExtraBit = + (msdIncludedBits + DigitBits) - BitsNeededForShiftedMantissa; + + bitsBeneathExtraBitInDigitContainingExtraBit = + second << (DigitBits - countOfBitsInSecondDigitBelowExtraBit); + } else { + shiftedMantissa |= uint64_t(second) << msdIgnoredBits; + + if (msdIncludedBits + DigitBits >= BitsNeededForShiftedMantissa) { + digitContainingExtraBit = length - 2; + + // msdIncludedBits + DigitBits + // ______|________ + // / | + // DigitBits=32 + // msdIncludedBits | + // _|_ _____|___ + // / \ / | + // [001·····|···········| + // \___________/\__| + // | | + // | bits below the extra bit (may be no bits) + // BitsNeededForShiftedMantissa=SignificandWidth+1 + const uint8_t countOfBitsInSecondDigitBelowExtraBit = + (msdIncludedBits + DigitBits) - BitsNeededForShiftedMantissa; + + bitsBeneathExtraBitInDigitContainingExtraBit = + second & ((Digit(1) << countOfBitsInSecondDigitBelowExtraBit) - 1); + } else { + MOZ_ASSERT(length >= 3, + "we must have at least three digits here, because " + "`msdIncludedBits + 32 < BitsNeededForShiftedMantissa` " + "guarantees `x < 2**53` -- and therefore the " + "MaxIntegralPrecisionDouble optimization above will have " + "handled two-digit cases"); + + Digit third = x->digit(length - 3); + shiftedMantissa |= uint64_t(third) >> msdIncludedBits; + + digitContainingExtraBit = length - 3; + + // msdIncludedBits + DigitBits + DigitBits + // ____________|______________ + // / | + // DigitBits=32 + // msdIncludedBits | DigitBits=32 + // _|_ _____|___ ____|____ + // / \ / \ / | + // [001·····|···········|···········| + // \____________________/\_____| + // | | + // | bits below the extra bit + // BitsNeededForShiftedMantissa=SignificandWidth+1 + static_assert(2 * DigitBits > BitsNeededForShiftedMantissa, + "two 32-bit digits should more than fill a mantissa"); + const uint8_t countOfBitsInThirdDigitBelowExtraBit = + msdIncludedBits + 2 * DigitBits - BitsNeededForShiftedMantissa; + + // Shift out the mantissa bits and the extra bit. + bitsBeneathExtraBitInDigitContainingExtraBit = + third << (DigitBits - countOfBitsInThirdDigitBelowExtraBit); + } + } + } + + constexpr uint64_t LeastSignificantBit = uint64_t(1) + << (64 - SignificandWidth); + constexpr uint64_t ExtraBit = LeastSignificantBit >> 1; + + // The extra bit must be set for rounding to change the mantissa. + if ((shiftedMantissa & ExtraBit) != 0) { + bool shouldRoundUp; + if (shiftedMantissa & LeastSignificantBit) { + // If the lowest mantissa bit is set, it doesn't matter what lower bits + // are: nearest-even rounds up regardless. + shouldRoundUp = true; + } else { + // If the lowest mantissa bit is unset, *all* lower bits are relevant. + // All-zero bits below the extra bit situates `x` halfway between two + // values, and the nearest *even* value lies downward. But if any bit + // below the extra bit is set, `x` is closer to the rounded-up value. + shouldRoundUp = bitsBeneathExtraBitInDigitContainingExtraBit != 0; + if (!shouldRoundUp) { + while (digitContainingExtraBit-- > 0) { + if (x->digit(digitContainingExtraBit) != 0) { + shouldRoundUp = true; + break; + } + } + } + } + + if (shouldRoundUp) { + // Add one to the significand bits. If they overflow, the exponent must + // also be increased. If *that* overflows, return the correct infinity. + uint64_t before = shiftedMantissa; + shiftedMantissa += ExtraBit; + if (shiftedMantissa < before) { + exponent++; + if (exponent > ExponentBias) { + return x->isNegative() ? NegativeInfinity() + : PositiveInfinity(); + } + } + } + } + + uint64_t significandBits = shiftedMantissa >> (64 - SignificandWidth); + uint64_t signBit = uint64_t(x->isNegative() ? 1 : 0) << SignShift; + uint64_t exponentBits = (exponent + ExponentBias) << ExponentShift; + return mozilla::BitwiseCast(signBit | exponentBits | significandBits); +} + +int8_t BigInt::compare(BigInt* x, BigInt* y) { + // Sanity checks to catch negative zeroes escaping to the wild. + MOZ_ASSERT(!x->isNegative() || !x->isZero()); + MOZ_ASSERT(!y->isNegative() || !y->isZero()); + + bool xSign = x->isNegative(); + + if (xSign != y->isNegative()) { + return xSign ? -1 : 1; + } + + if (xSign) { + std::swap(x, y); + } + + return absoluteCompare(x, y); +} + +bool BigInt::equal(BigInt* lhs, BigInt* rhs) { + if (lhs == rhs) { + return true; + } + if (lhs->digitLength() != rhs->digitLength()) { + return false; + } + if (lhs->isNegative() != rhs->isNegative()) { + return false; + } + for (size_t i = 0; i < lhs->digitLength(); i++) { + if (lhs->digit(i) != rhs->digit(i)) { + return false; + } + } + return true; +} + +int8_t BigInt::compare(BigInt* x, double y) { + MOZ_ASSERT(!std::isnan(y)); + + constexpr int LessThan = -1, Equal = 0, GreaterThan = 1; + + // ±Infinity exceeds a finite bigint value. + if (!std::isfinite(y)) { + return y > 0 ? LessThan : GreaterThan; + } + + // Handle `x === 0n` and `y == 0` special cases. + if (x->isZero()) { + if (y == 0) { + // -0 and +0 are treated identically. + return Equal; + } + + return y > 0 ? LessThan : GreaterThan; + } + + const bool xNegative = x->isNegative(); + if (y == 0) { + return xNegative ? LessThan : GreaterThan; + } + + // Nonzero `x` and `y` with different signs are trivially compared. + const bool yNegative = y < 0; + if (xNegative != yNegative) { + return xNegative ? LessThan : GreaterThan; + } + + // `x` and `y` are same-signed. Determine which has greater magnitude, + // then combine that with the signedness just computed to reach a result. + const int exponent = mozilla::ExponentComponent(y); + if (exponent < 0) { + // `y` is a nonzero fraction of magnitude less than 1. + return xNegative ? LessThan : GreaterThan; + } + + size_t xLength = x->digitLength(); + MOZ_ASSERT(xLength > 0); + + Digit xMSD = x->digit(xLength - 1); + const int shift = DigitLeadingZeroes(xMSD); + int xBitLength = xLength * DigitBits - shift; + + // Differing bit-length makes for a simple comparison. + int yBitLength = exponent + 1; + if (xBitLength < yBitLength) { + return xNegative ? GreaterThan : LessThan; + } + if (xBitLength > yBitLength) { + return xNegative ? LessThan : GreaterThan; + } + + // Compare the high 64 bits of both numbers. (Lower-order bits not present + // in either number are zeroed.) Either that distinguishes `x` and `y`, or + // `x` and `y` differ only if a subsequent nonzero bit in `x` means `x` has + // larger magnitude. + + using Double = mozilla::FloatingPoint; + constexpr uint8_t SignificandWidth = Double::kSignificandWidth; + constexpr uint64_t SignificandBits = Double::kSignificandBits; + + const uint64_t doubleBits = mozilla::BitwiseCast(y); + const uint64_t significandBits = doubleBits & SignificandBits; + + // Readd the implicit-one bit when constructing `y`'s high 64 bits. + const uint64_t yHigh64Bits = + ((uint64_t(1) << SignificandWidth) | significandBits) + << (64 - SignificandWidth - 1); + + // Cons up `x`'s high 64 bits, backfilling zeroes for binary fractions of 1 + // if `x` doesn't have 64 bits. + uint8_t xBitsFilled = DigitBits - shift; + uint64_t xHigh64Bits = uint64_t(xMSD) << (64 - xBitsFilled); + + // At this point we no longer need to look at the most significant digit. + xLength--; + + // The high 64 bits from `x` will probably not align to a digit boundary. + // `xHasNonZeroLeftoverBits` will be set to true if any remaining + // least-significant bit from the digit holding xHigh64Bits's + // least-significant bit is nonzero. + bool xHasNonZeroLeftoverBits = false; + + if (xBitsFilled < std::min(xBitLength, 64)) { + MOZ_ASSERT(xLength >= 1, + "If there are more bits to fill, there should be " + "more digits to fill them from"); + + Digit second = x->digit(--xLength); + if (DigitBits == 32) { + xBitsFilled += 32; + xHigh64Bits |= uint64_t(second) << (64 - xBitsFilled); + if (xBitsFilled < 64 && xLength >= 1) { + Digit third = x->digit(--xLength); + const uint8_t neededBits = 64 - xBitsFilled; + xHigh64Bits |= uint64_t(third) >> (DigitBits - neededBits); + xHasNonZeroLeftoverBits = (third << neededBits) != 0; + } + } else { + const uint8_t neededBits = 64 - xBitsFilled; + xHigh64Bits |= uint64_t(second) >> (DigitBits - neededBits); + xHasNonZeroLeftoverBits = (second << neededBits) != 0; + } + } + + // If high bits are unequal, the larger one has greater magnitude. + if (yHigh64Bits > xHigh64Bits) { + return xNegative ? GreaterThan : LessThan; + } + if (xHigh64Bits > yHigh64Bits) { + return xNegative ? LessThan : GreaterThan; + } + + // Otherwise the top 64 bits of both are equal. If the values differ, a + // lower-order bit in `x` is nonzero and `x` has greater magnitude than + // `y`; otherwise `x == y`. + if (xHasNonZeroLeftoverBits) { + return xNegative ? LessThan : GreaterThan; + } + while (xLength != 0) { + if (x->digit(--xLength) != 0) { + return xNegative ? LessThan : GreaterThan; + } + } + + return Equal; +} + +bool BigInt::equal(BigInt* lhs, double rhs) { + if (std::isnan(rhs)) { + return false; + } + return compare(lhs, rhs) == 0; +} + +JS::Result BigInt::equal(JSContext* cx, Handle lhs, + HandleString rhs) { + BigInt* rhsBigInt; + MOZ_TRY_VAR(rhsBigInt, StringToBigInt(cx, rhs)); + if (!rhsBigInt) { + return false; + } + return equal(lhs, rhsBigInt); +} + +// BigInt proposal section 3.2.5 +JS::Result BigInt::looselyEqual(JSContext* cx, HandleBigInt lhs, + HandleValue rhs) { + // Step 1. + if (rhs.isBigInt()) { + return equal(lhs, rhs.toBigInt()); + } + + // Steps 2-5 (not applicable). + + // Steps 6-7. + if (rhs.isString()) { + RootedString rhsString(cx, rhs.toString()); + return equal(cx, lhs, rhsString); + } + + // Steps 8-9 (not applicable). + + // Steps 10-11. + if (rhs.isObject()) { + RootedValue rhsPrimitive(cx, rhs); + if (!ToPrimitive(cx, &rhsPrimitive)) { + return cx->alreadyReportedError(); + } + return looselyEqual(cx, lhs, rhsPrimitive); + } + + // Step 12. + if (rhs.isNumber()) { + return equal(lhs, rhs.toNumber()); + } + + // Step 13. + return false; +} + +// BigInt proposal section 1.1.12. BigInt::lessThan ( x, y ) +bool BigInt::lessThan(BigInt* x, BigInt* y) { return compare(x, y) < 0; } + +Maybe BigInt::lessThan(BigInt* lhs, double rhs) { + if (std::isnan(rhs)) { + return Maybe(Nothing()); + } + return Some(compare(lhs, rhs) < 0); +} + +Maybe BigInt::lessThan(double lhs, BigInt* rhs) { + if (std::isnan(lhs)) { + return Maybe(Nothing()); + } + return Some(-compare(rhs, lhs) < 0); +} + +bool BigInt::lessThan(JSContext* cx, HandleBigInt lhs, HandleString rhs, + Maybe& res) { + BigInt* rhsBigInt; + JS_TRY_VAR_OR_RETURN_FALSE(cx, rhsBigInt, StringToBigInt(cx, rhs)); + if (!rhsBigInt) { + res = Nothing(); + return true; + } + res = Some(lessThan(lhs, rhsBigInt)); + return true; +} + +bool BigInt::lessThan(JSContext* cx, HandleString lhs, HandleBigInt rhs, + Maybe& res) { + BigInt* lhsBigInt; + JS_TRY_VAR_OR_RETURN_FALSE(cx, lhsBigInt, StringToBigInt(cx, lhs)); + if (!lhsBigInt) { + res = Nothing(); + return true; + } + res = Some(lessThan(lhsBigInt, rhs)); + return true; +} + +bool BigInt::lessThan(JSContext* cx, HandleValue lhs, HandleValue rhs, + Maybe& res) { + if (lhs.isBigInt()) { + if (rhs.isString()) { + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedString rhsString(cx, rhs.toString()); + return lessThan(cx, lhsBigInt, rhsString, res); + } + + if (rhs.isNumber()) { + res = lessThan(lhs.toBigInt(), rhs.toNumber()); + return true; + } + + MOZ_ASSERT(rhs.isBigInt()); + res = Some(lessThan(lhs.toBigInt(), rhs.toBigInt())); + return true; + } + + MOZ_ASSERT(rhs.isBigInt()); + if (lhs.isString()) { + RootedString lhsString(cx, lhs.toString()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + return lessThan(cx, lhsString, rhsBigInt, res); + } + + MOZ_ASSERT(lhs.isNumber()); + res = lessThan(lhs.toNumber(), rhs.toBigInt()); + return true; +} + +template +JSLinearString* BigInt::toString(JSContext* cx, HandleBigInt x, uint8_t radix) { + MOZ_ASSERT(2 <= radix && radix <= 36); + + if (x->isZero()) { + return cx->staticStrings().getInt(0); + } + + if (mozilla::IsPowerOfTwo(radix)) { + return toStringBasePowerOfTwo(cx, x, radix); + } + + if (radix == 10 && x->digitLength() == 1) { + return toStringSingleDigitBaseTen(cx, x->digit(0), + x->isNegative()); + } + + // Punt on doing generic toString without GC. + if (!allowGC) { + return nullptr; + } + + return toStringGeneric(cx, x, radix); +} + +template JSLinearString* BigInt::toString(JSContext* cx, + HandleBigInt x, + uint8_t radix); +template JSLinearString* BigInt::toString(JSContext* cx, + HandleBigInt x, + uint8_t radix); + +template +static inline BigInt* ParseStringBigIntLiteral(JSContext* cx, + Range range, + bool* haveParseError) { + auto start = range.begin(); + auto end = range.end(); + + while (start < end && unicode::IsSpace(start[0])) { + start++; + } + + while (start < end && unicode::IsSpace(end[-1])) { + end--; + } + + if (start == end) { + return BigInt::zero(cx); + } + + // StringNumericLiteral ::: StrDecimalLiteral, but without Infinity, decimal + // points, or exponents. Note that the raw '+' or '-' cases fall through + // because the string is too short, and eventually signal a parse error. + if (end - start > 1) { + if (start[0] == '+') { + bool isNegative = false; + start++; + return BigInt::parseLiteralDigits(cx, Range(start, end), 10, + isNegative, haveParseError); + } + if (start[0] == '-') { + bool isNegative = true; + start++; + return BigInt::parseLiteralDigits(cx, Range(start, end), 10, + isNegative, haveParseError); + } + } + + return BigInt::parseLiteral(cx, Range(start, end), + haveParseError); +} + +// Called from BigInt constructor. +JS::Result js::StringToBigInt(JSContext* cx, HandleString str) { + JSLinearString* linear = str->ensureLinear(cx); + if (!linear) { + return cx->alreadyReportedOOM(); + } + + AutoStableStringChars chars(cx); + if (!chars.init(cx, str)) { + return cx->alreadyReportedOOM(); + } + + BigInt* res; + bool parseError = false; + if (chars.isLatin1()) { + res = ParseStringBigIntLiteral(cx, chars.latin1Range(), &parseError); + } else { + res = ParseStringBigIntLiteral(cx, chars.twoByteRange(), &parseError); + } + + // A nullptr result can indicate either a parse error or generic error. + if (!res && !parseError) { + return cx->alreadyReportedError(); + } + + return res; +} + +// Called from parser with already trimmed and validated token. +BigInt* js::ParseBigIntLiteral(JSContext* cx, + const Range& chars) { + // This function is only called from the frontend when parsing BigInts. Parsed + // BigInts are stored in the script's data vector and therefore need to be + // allocated in the tenured heap. + constexpr gc::Heap heap = gc::Heap::Tenured; + + bool parseError = false; + BigInt* res = BigInt::parseLiteral(cx, chars, &parseError, heap); + if (!res) { + return nullptr; + } + MOZ_ASSERT(res->isTenured()); + MOZ_RELEASE_ASSERT(!parseError); + return res; +} + +// Check a already validated numeric literal for a non-zero value. Used by +// the parsers node folder in deferred mode. +bool js::BigIntLiteralIsZero(const mozilla::Range& chars) { + return BigInt::literalIsZero(chars); +} + +template +JSAtom* js::BigIntToAtom(JSContext* cx, HandleBigInt bi) { + JSString* str = BigInt::toString(cx, bi, 10); + if (!str) { + return nullptr; + } + JSAtom* atom = AtomizeString(cx, str); + if (!atom) { + if constexpr (!allowGC) { + // NOTE: AtomizeString can call ReportAllocationOverflow other than + // ReportOutOfMemory, but ReportAllocationOverflow cannot happen + // because the length is guarded by BigInt::toString. + cx->recoverFromOutOfMemory(); + } + return nullptr; + } + return atom; +} + +template JSAtom* js::BigIntToAtom(JSContext* cx, HandleBigInt bi); +template JSAtom* js::BigIntToAtom(JSContext* cx, HandleBigInt bi); + +#if defined(DEBUG) || defined(JS_JITSPEW) +void BigInt::dump() const { + js::Fprinter out(stderr); + dump(out); +} + +void BigInt::dump(js::GenericPrinter& out) const { + if (isNegative()) { + out.putChar('-'); + } + + if (digitLength() == 0) { + out.put("0"); + } else if (digitLength() == 1) { + uint64_t d = digit(0); + out.printf("%" PRIu64, d); + } else { + out.put("0x"); + for (size_t i = 0; i < digitLength(); i++) { + uint64_t d = digit(digitLength() - i - 1); + if (sizeof(Digit) == 4) { + out.printf("%.8" PRIX32, uint32_t(d)); + } else { + out.printf("%.16" PRIX64, d); + } + } + } + + out.putChar('n'); +} +#endif + +JS::ubi::Node::Size JS::ubi::Concrete::size( + mozilla::MallocSizeOf mallocSizeOf) const { + BigInt& bi = get(); + size_t size = sizeof(JS::BigInt); + if (IsInsideNursery(&bi)) { + size += Nursery::nurseryCellHeaderSize(); + size += bi.sizeOfExcludingThisInNursery(mallocSizeOf); + } else { + size += bi.sizeOfExcludingThis(mallocSizeOf); + } + return size; +} + +// Public API + +BigInt* JS::NumberToBigInt(JSContext* cx, double num) { + return js::NumberToBigInt(cx, num); +} + +template +static inline BigInt* StringToBigIntHelper(JSContext* cx, + Range& chars) { + bool parseError = false; + BigInt* bi = ParseStringBigIntLiteral(cx, chars, &parseError); + if (!bi) { + if (parseError) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_INVALID_SYNTAX); + } + return nullptr; + } + MOZ_RELEASE_ASSERT(!parseError); + return bi; +} + +BigInt* JS::StringToBigInt(JSContext* cx, Range chars) { + return StringToBigIntHelper(cx, chars); +} + +BigInt* JS::StringToBigInt(JSContext* cx, Range chars) { + return StringToBigIntHelper(cx, chars); +} + +static inline BigInt* SimpleStringToBigIntHelper( + JSContext* cx, mozilla::Span chars, uint8_t radix, + bool* haveParseError) { + if (chars.Length() > 1) { + if (chars[0] == '+') { + return BigInt::parseLiteralDigits( + cx, Range{chars.From(1)}, radix, + /* isNegative = */ false, haveParseError); + } + if (chars[0] == '-') { + return BigInt::parseLiteralDigits( + cx, Range{chars.From(1)}, radix, + /* isNegative = */ true, haveParseError); + } + } + + return BigInt::parseLiteralDigits(cx, Range{chars}, radix, + /* isNegative = */ false, haveParseError); +} + +BigInt* JS::SimpleStringToBigInt(JSContext* cx, mozilla::Span chars, + uint8_t radix) { + if (chars.empty()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_INVALID_SYNTAX); + return nullptr; + } + if (radix < 2 || radix > 36) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_RADIX); + return nullptr; + } + + mozilla::Span latin1{ + reinterpret_cast(chars.data()), chars.size()}; + bool haveParseError = false; + BigInt* bi = SimpleStringToBigIntHelper(cx, latin1, radix, &haveParseError); + if (!bi) { + if (haveParseError) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_INVALID_SYNTAX); + } + return nullptr; + } + MOZ_RELEASE_ASSERT(!haveParseError); + return bi; +} + +BigInt* JS::ToBigInt(JSContext* cx, HandleValue val) { + return js::ToBigInt(cx, val); +} + +int64_t JS::ToBigInt64(JS::BigInt* bi) { return BigInt::toInt64(bi); } + +uint64_t JS::ToBigUint64(JS::BigInt* bi) { return BigInt::toUint64(bi); } + +double JS::BigIntToNumber(JS::BigInt* bi) { return BigInt::numberValue(bi); } + +bool JS::BigIntIsNegative(BigInt* bi) { + return !bi->isZero() && bi->isNegative(); +} + +bool JS::BigIntFitsNumber(BigInt* bi, double* out) { + return bi->isNumber(bi, out); +} + +JSString* JS::BigIntToString(JSContext* cx, Handle bi, uint8_t radix) { + if (radix < 2 || radix > 36) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_RADIX); + return nullptr; + } + return BigInt::toString(cx, bi, radix); +} + +// Semi-public template details + +BigInt* JS::detail::BigIntFromInt64(JSContext* cx, int64_t num) { + return BigInt::createFromInt64(cx, num); +} + +BigInt* JS::detail::BigIntFromUint64(JSContext* cx, uint64_t num) { + return BigInt::createFromUint64(cx, num); +} + +BigInt* JS::detail::BigIntFromBool(JSContext* cx, bool b) { + return b ? BigInt::one(cx) : BigInt::zero(cx); +} + +bool JS::detail::BigIntIsInt64(BigInt* bi, int64_t* result) { + return BigInt::isInt64(bi, result); +} + +bool JS::detail::BigIntIsUint64(BigInt* bi, uint64_t* result) { + return BigInt::isUint64(bi, result); +} diff --git a/js/src/vm/BigIntType.h b/js/src/vm/BigIntType.h new file mode 100644 index 0000000000..c8e264b20b --- /dev/null +++ b/js/src/vm/BigIntType.h @@ -0,0 +1,481 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BigIntType_h +#define vm_BigIntType_h + +#include "mozilla/Assertions.h" +#include "mozilla/OperatorNewExtensions.h" +#include "mozilla/Range.h" +#include "mozilla/Span.h" + +#include "jstypes.h" + +#include "gc/Allocator.h" +#include "gc/Cell.h" +#include "gc/StoreBuffer.h" +#include "js/Result.h" +#include "js/RootingAPI.h" +#include "js/TraceKind.h" +#include "js/TypeDecls.h" + +namespace js { + +namespace gc { +class TenuringTracer; +} // namespace gc + +namespace jit { +class MacroAssembler; +} // namespace jit + +} // namespace js + +namespace JS { + +class JS_PUBLIC_API BigInt; + +class BigInt final : public js::gc::CellWithLengthAndFlags { + friend class js::gc::CellAllocator; + + BigInt() = default; + + public: + using Digit = uintptr_t; + + private: + // The low CellFlagBitsReservedForGC flag bits are reserved. + static constexpr uintptr_t SignBit = + js::Bit(js::gc::CellFlagBitsReservedForGC); + + static constexpr size_t InlineDigitsLength = + (js::gc::MinCellSize - sizeof(CellWithLengthAndFlags)) / sizeof(Digit); + + public: + // The number of digits and the flags are stored in the cell header. + size_t digitLength() const { return headerLengthField(); } + + private: + // The digit storage starts with the least significant digit (little-endian + // digit order). Byte order within a digit is of course native endian. + union { + Digit* heapDigits_; + Digit inlineDigits_[InlineDigitsLength]; + }; + + void setLengthAndFlags(uint32_t len, uint32_t flags) { + setHeaderLengthAndFlags(len, flags); + } + + public: + static const JS::TraceKind TraceKind = JS::TraceKind::BigInt; + + void fixupAfterMovingGC() {} + + js::gc::AllocKind getAllocKind() const { return js::gc::AllocKind::BIGINT; } + + // Offset for direct access from JIT code. + static constexpr size_t offsetOfDigitLength() { + return offsetOfHeaderLength(); + } + + bool hasInlineDigits() const { return digitLength() <= InlineDigitsLength; } + bool hasHeapDigits() const { return !hasInlineDigits(); } + + using Digits = mozilla::Span; + Digits digits() { + return Digits(hasInlineDigits() ? inlineDigits_ : heapDigits_, + digitLength()); + } + using ConstDigits = mozilla::Span; + ConstDigits digits() const { + return ConstDigits(hasInlineDigits() ? inlineDigits_ : heapDigits_, + digitLength()); + } + Digit digit(size_t idx) const { return digits()[idx]; } + void setDigit(size_t idx, Digit digit) { digits()[idx] = digit; } + + bool isZero() const { return digitLength() == 0; } + bool isNegative() const { return headerFlagsField() & SignBit; } + + void initializeDigitsToZero(); + + void traceChildren(JSTracer* trc); + + static MOZ_ALWAYS_INLINE void postWriteBarrier(void* cellp, BigInt* prev, + BigInt* next) { + js::gc::PostWriteBarrierImpl(cellp, prev, next); + } + + void finalize(JS::GCContext* gcx); + js::HashNumber hash() const; + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; + size_t sizeOfExcludingThisInNursery(mozilla::MallocSizeOf mallocSizeOf) const; + + static BigInt* createUninitialized(JSContext* cx, size_t digitLength, + bool isNegative, + js::gc::Heap heap = js::gc::Heap::Default); + static BigInt* createFromDouble(JSContext* cx, double d); + static BigInt* createFromUint64(JSContext* cx, uint64_t n); + static BigInt* createFromInt64(JSContext* cx, int64_t n); + static BigInt* createFromDigit(JSContext* cx, Digit d, bool isNegative); + static BigInt* createFromNonZeroRawUint64(JSContext* cx, uint64_t n, + bool isNegative); + // FIXME: Cache these values. + static BigInt* zero(JSContext* cx, js::gc::Heap heap = js::gc::Heap::Default); + static BigInt* one(JSContext* cx); + static BigInt* negativeOne(JSContext* cx); + + static BigInt* copy(JSContext* cx, Handle x, + js::gc::Heap heap = js::gc::Heap::Default); + static BigInt* add(JSContext* cx, Handle x, Handle y); + static BigInt* sub(JSContext* cx, Handle x, Handle y); + static BigInt* mul(JSContext* cx, Handle x, Handle y); + static BigInt* div(JSContext* cx, Handle x, Handle y); + static BigInt* mod(JSContext* cx, Handle x, Handle y); + static BigInt* pow(JSContext* cx, Handle x, Handle y); + static BigInt* neg(JSContext* cx, Handle x); + static BigInt* inc(JSContext* cx, Handle x); + static BigInt* dec(JSContext* cx, Handle x); + static BigInt* lsh(JSContext* cx, Handle x, Handle y); + static BigInt* rsh(JSContext* cx, Handle x, Handle y); + static BigInt* bitAnd(JSContext* cx, Handle x, Handle y); + static BigInt* bitXor(JSContext* cx, Handle x, Handle y); + static BigInt* bitOr(JSContext* cx, Handle x, Handle y); + static BigInt* bitNot(JSContext* cx, Handle x); + + static int64_t toInt64(const BigInt* x); + static uint64_t toUint64(const BigInt* x); + + // Return true if the BigInt is without loss of precision representable as an + // int64 and store the int64 value in the output. Otherwise return false and + // leave the value of the output parameter unspecified. + static bool isInt64(BigInt* x, int64_t* result); + + // Return true if the BigInt is without loss of precision representable as an + // uint64 and store the uint64 value in the output. Otherwise return false and + // leave the value of the output parameter unspecified. + static bool isUint64(BigInt* x, uint64_t* result); + + // Return true if the BigInt is without loss of precision representable as a + // JS Number (double) and store the double value in the output. Otherwise + // return false and leave the value of the output parameter unspecified. + static bool isNumber(BigInt* x, double* result); + + static BigInt* asIntN(JSContext* cx, Handle x, uint64_t bits); + static BigInt* asUintN(JSContext* cx, Handle x, uint64_t bits); + + // Type-checking versions of arithmetic operations. These methods + // must be called with at least one BigInt operand. Binary + // operations will throw a TypeError if one of the operands is not a + // BigInt value. + static bool addValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool subValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool mulValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool divValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool modValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool powValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool negValue(JSContext* cx, Handle operand, + MutableHandle res); + static bool incValue(JSContext* cx, Handle operand, + MutableHandle res); + static bool decValue(JSContext* cx, Handle operand, + MutableHandle res); + static bool lshValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool rshValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool bitAndValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool bitXorValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool bitOrValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool bitNotValue(JSContext* cx, Handle operand, + MutableHandle res); + + static double numberValue(BigInt* x); + + template + static JSLinearString* toString(JSContext* cx, Handle x, + uint8_t radix); + template + static BigInt* parseLiteral(JSContext* cx, + const mozilla::Range chars, + bool* haveParseError, + js::gc::Heap heap = js::gc::Heap::Default); + template + static BigInt* parseLiteralDigits(JSContext* cx, + const mozilla::Range chars, + unsigned radix, bool isNegative, + bool* haveParseError, + js::gc::Heap heap = js::gc::Heap::Default); + + template + static bool literalIsZero(const mozilla::Range chars); + + static int8_t compare(BigInt* lhs, BigInt* rhs); + static bool equal(BigInt* lhs, BigInt* rhs); + static bool equal(BigInt* lhs, double rhs); + static JS::Result equal(JSContext* cx, Handle lhs, + HandleString rhs); + static JS::Result looselyEqual(JSContext* cx, Handle lhs, + HandleValue rhs); + + static bool lessThan(BigInt* x, BigInt* y); + // These methods return Nothing when the non-BigInt operand is NaN + // or a string that can't be interpreted as a BigInt. + static mozilla::Maybe lessThan(BigInt* lhs, double rhs); + static mozilla::Maybe lessThan(double lhs, BigInt* rhs); + static bool lessThan(JSContext* cx, Handle lhs, HandleString rhs, + mozilla::Maybe& res); + static bool lessThan(JSContext* cx, HandleString lhs, Handle rhs, + mozilla::Maybe& res); + static bool lessThan(JSContext* cx, HandleValue lhs, HandleValue rhs, + mozilla::Maybe& res); + +#if defined(DEBUG) || defined(JS_JITSPEW) + void dump() const; // Debugger-friendly stderr dump. + void dump(js::GenericPrinter& out) const; +#endif + + public: + static constexpr size_t DigitBits = sizeof(Digit) * CHAR_BIT; + + private: + static constexpr size_t HalfDigitBits = DigitBits / 2; + static constexpr Digit HalfDigitMask = (1ull << HalfDigitBits) - 1; + + static_assert(DigitBits == 32 || DigitBits == 64, + "Unexpected BigInt Digit size"); + + // Limit the size of bigint values to 1 million bits, to prevent excessive + // memory usage. This limit may be raised in the future if needed. Note + // however that there are many parts of the implementation that rely on being + // able to count and index bits using a 32-bit signed ints, so until those + // sites are fixed, the practical limit is 0x7fffffff bits. + static constexpr size_t MaxBitLength = 1024 * 1024; + static constexpr size_t MaxDigitLength = MaxBitLength / DigitBits; + + // BigInts can be serialized to strings of radix between 2 and 36. For a + // given bigint, radix 2 will take the most characters (one per bit). + // Ensure that the max bigint size is small enough so that we can fit the + // corresponding character count into a size_t, with space for a possible + // sign prefix. + static_assert(MaxBitLength <= std::numeric_limits::max() - 1, + "BigInt max length must be small enough to be serialized as a " + "binary string"); + + static size_t calculateMaximumCharactersRequired(HandleBigInt x, + unsigned radix); + [[nodiscard]] static bool calculateMaximumDigitsRequired(JSContext* cx, + uint8_t radix, + size_t charCount, + size_t* result); + + static bool absoluteDivWithDigitDivisor( + JSContext* cx, Handle x, Digit divisor, + const mozilla::Maybe>& quotient, Digit* remainder, + bool quotientNegative); + static void internalMultiplyAdd(BigInt* source, Digit factor, Digit summand, + unsigned, BigInt* result); + static void multiplyAccumulate(BigInt* multiplicand, Digit multiplier, + BigInt* accumulator, + unsigned accumulatorIndex); + static bool absoluteDivWithBigIntDivisor( + JSContext* cx, Handle dividend, Handle divisor, + const mozilla::Maybe>& quotient, + const mozilla::Maybe>& remainder, + bool quotientNegative); + + enum class LeftShiftMode { SameSizeResult, AlwaysAddOneDigit }; + + static BigInt* absoluteLeftShiftAlwaysCopy(JSContext* cx, Handle x, + unsigned shift, LeftShiftMode); + static bool productGreaterThan(Digit factor1, Digit factor2, Digit high, + Digit low); + static BigInt* lshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y); + static BigInt* rshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y); + static BigInt* rshByMaximum(JSContext* cx, bool isNegative); + static BigInt* truncateAndSubFromPowerOfTwo(JSContext* cx, HandleBigInt x, + uint64_t bits, + bool resultNegative); + + Digit absoluteInplaceAdd(BigInt* summand, unsigned startIndex); + Digit absoluteInplaceSub(BigInt* subtrahend, unsigned startIndex); + void inplaceRightShiftLowZeroBits(unsigned shift); + void inplaceMultiplyAdd(Digit multiplier, Digit part); + + // The result of an SymmetricTrim bitwise op has as many digits as the + // smaller operand. A SymmetricFill bitwise op result has as many digits as + // the larger operand, with high digits (if any) copied from the larger + // operand. AsymmetricFill is like SymmetricFill, except the result has as + // many digits as the first operand; this kind is used for the and-not + // operation. + enum class BitwiseOpKind { SymmetricTrim, SymmetricFill, AsymmetricFill }; + + template + static BigInt* absoluteBitwiseOp(JSContext* cx, Handle x, + Handle y, BitwiseOp&& op); + + // Return `|x| & |y|`. + static BigInt* absoluteAnd(JSContext* cx, Handle x, + Handle y); + + // Return `|x| | |y|`. + static BigInt* absoluteOr(JSContext* cx, Handle x, + Handle y); + + // Return `|x| & ~|y|`. + static BigInt* absoluteAndNot(JSContext* cx, Handle x, + Handle y); + + // Return `|x| ^ |y|`. + static BigInt* absoluteXor(JSContext* cx, Handle x, + Handle y); + + // Return `(|x| + 1) * (resultNegative ? -1 : +1)`. + static BigInt* absoluteAddOne(JSContext* cx, Handle x, + bool resultNegative); + + // Return `(|x| - 1) * (resultNegative ? -1 : +1)`, with the precondition that + // |x| != 0. + static BigInt* absoluteSubOne(JSContext* cx, Handle x, + bool resultNegative = false); + + // Return `a + b`, incrementing `*carry` if the addition overflows. + static inline Digit digitAdd(Digit a, Digit b, Digit* carry) { + Digit result = a + b; + *carry += static_cast(result < a); + return result; + } + + // Return `left - right`, incrementing `*borrow` if the addition overflows. + static inline Digit digitSub(Digit left, Digit right, Digit* borrow) { + Digit result = left - right; + *borrow += static_cast(result > left); + return result; + } + + // Compute `a * b`, returning the low half of the result and putting the + // high half in `*high`. + static Digit digitMul(Digit a, Digit b, Digit* high); + + // Divide `(high << DigitBits) + low` by `divisor`, returning the quotient + // and storing the remainder in `*remainder`, with the precondition that + // `high < divisor` so that the result fits in a Digit. + static Digit digitDiv(Digit high, Digit low, Digit divisor, Digit* remainder); + + // Return `(|x| + |y|) * (resultNegative ? -1 : +1)`. + static BigInt* absoluteAdd(JSContext* cx, Handle x, + Handle y, bool resultNegative); + + // Return `(|x| - |y|) * (resultNegative ? -1 : +1)`, with the precondition + // that |x| >= |y|. + static BigInt* absoluteSub(JSContext* cx, Handle x, + Handle y, bool resultNegative); + + // If `|x| < |y|` return -1; if `|x| == |y|` return 0; otherwise return 1. + static int8_t absoluteCompare(BigInt* lhs, BigInt* rhs); + + static int8_t compare(BigInt* lhs, double rhs); + + template + static JSLinearString* toStringBasePowerOfTwo(JSContext* cx, Handle, + unsigned radix); + template + static JSLinearString* toStringSingleDigitBaseTen(JSContext* cx, Digit digit, + bool isNegative); + static JSLinearString* toStringGeneric(JSContext* cx, Handle, + unsigned radix); + + static BigInt* destructivelyTrimHighZeroDigits(JSContext* cx, BigInt* x); + + bool absFitsInUint64() const { return digitLength() <= 64 / DigitBits; } + + uint64_t uint64FromAbsNonZero() const { + MOZ_ASSERT(!isZero()); + + uint64_t val = digit(0); + if (DigitBits == 32 && digitLength() > 1) { + val |= static_cast(digit(1)) << 32; + } + return val; + } + + friend struct ::JSStructuredCloneReader; + friend struct ::JSStructuredCloneWriter; + + BigInt(const BigInt& other) = delete; + void operator=(const BigInt& other) = delete; + + public: + static constexpr size_t offsetOfFlags() { return offsetOfHeaderFlags(); } + static constexpr size_t offsetOfLength() { return offsetOfHeaderLength(); } + + static constexpr size_t signBitMask() { return SignBit; } + + private: + // To help avoid writing Spectre-unsafe code, we only allow MacroAssembler to + // call the methods below. + friend class js::jit::MacroAssembler; + + static size_t offsetOfInlineDigits() { + return offsetof(BigInt, inlineDigits_); + } + + static size_t offsetOfHeapDigits() { return offsetof(BigInt, heapDigits_); } + + static constexpr size_t inlineDigitsLength() { return InlineDigitsLength; } + + private: + friend class js::gc::TenuringTracer; +}; + +static_assert( + sizeof(BigInt) >= js::gc::MinCellSize, + "sizeof(BigInt) must be greater than the minimum allocation size"); + +static_assert( + sizeof(BigInt) == js::gc::MinCellSize, + "sizeof(BigInt) intended to be the same as the minimum allocation size"); + +} // namespace JS + +namespace js { + +template +extern JSAtom* BigIntToAtom(JSContext* cx, JS::HandleBigInt bi); + +extern JS::BigInt* NumberToBigInt(JSContext* cx, double d); + +// Parse a BigInt from a string, using the method specified for StringToBigInt. +// Used by the BigInt constructor among other places. +extern JS::Result StringToBigInt(JSContext* cx, + JS::Handle str); + +// Parse a BigInt from an already-validated numeric literal. Used by the +// parser. Can only fail in out-of-memory situations. +extern JS::BigInt* ParseBigIntLiteral( + JSContext* cx, const mozilla::Range& chars); + +// Check an already validated numeric literal for a non-zero value. Used by +// the parsers node folder in deferred mode. +extern bool BigIntLiteralIsZero(const mozilla::Range& chars); + +extern JS::BigInt* ToBigInt(JSContext* cx, JS::Handle v); +extern JS::Result ToBigInt64(JSContext* cx, JS::Handle v); +extern JS::Result ToBigUint64(JSContext* cx, JS::Handle v); + +} // namespace js + +#endif diff --git a/js/src/vm/BindingKind.h b/js/src/vm/BindingKind.h new file mode 100644 index 0000000000..793aa7e82f --- /dev/null +++ b/js/src/vm/BindingKind.h @@ -0,0 +1,111 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BindingKind_h +#define vm_BindingKind_h + +#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF +#include "mozilla/Casting.h" // mozilla::AssertedCast + +#include // uint16_t, uint32_t + +#include "vm/BytecodeUtil.h" // LOCALNO_LIMIT, ENVCOORD_SLOT_LIMIT + +namespace js { + +enum class BindingKind : uint8_t { + Import, + FormalParameter, + Var, + Let, + Const, + + // So you think named lambda callee names are consts? Nope! They don't + // throw when being assigned to in sloppy mode. + NamedLambdaCallee, + + // ClassBodyScope bindings that aren't bindings in the spec, but are put into + // a scope as an implementation detail: `.privateBrand`, + // `.staticInitializers`, private names, and private accessor functions. + Synthetic, + + // ClassBodyScope binding that stores the function object for a non-static + // private method. + PrivateMethod, +}; + +static inline bool BindingKindIsLexical(BindingKind kind) { + return kind == BindingKind::Let || kind == BindingKind::Const; +} + +class BindingLocation { + public: + enum class Kind { + Global, + Argument, + Frame, + Environment, + Import, + NamedLambdaCallee + }; + + private: + Kind kind_; + uint32_t slot_; + + BindingLocation(Kind kind, uint32_t slot) : kind_(kind), slot_(slot) {} + + public: + static BindingLocation Global() { + return BindingLocation(Kind::Global, UINT32_MAX); + } + + static BindingLocation Argument(uint16_t slot) { + return BindingLocation(Kind::Argument, slot); + } + + static BindingLocation Frame(uint32_t slot) { + MOZ_ASSERT(slot < LOCALNO_LIMIT); + return BindingLocation(Kind::Frame, slot); + } + + static BindingLocation Environment(uint32_t slot) { + MOZ_ASSERT(slot < ENVCOORD_SLOT_LIMIT); + return BindingLocation(Kind::Environment, slot); + } + + static BindingLocation Import() { + return BindingLocation(Kind::Import, UINT32_MAX); + } + + static BindingLocation NamedLambdaCallee() { + return BindingLocation(Kind::NamedLambdaCallee, UINT32_MAX); + } + + bool operator==(const BindingLocation& other) const { + return kind_ == other.kind_ && slot_ == other.slot_; + } + + bool operator!=(const BindingLocation& other) const { + return !operator==(other); + } + + Kind kind() const { return kind_; } + + uint32_t slot() const { + MOZ_ASSERT(kind_ == Kind::Frame || kind_ == Kind::Environment); + return slot_; + } + + uint16_t argumentSlot() const { + MOZ_ASSERT(kind_ == Kind::Argument); + return mozilla::AssertedCast(slot_); + } +}; + +} // namespace js + +#endif // vm_BindingKind_h diff --git a/js/src/vm/BooleanObject-inl.h b/js/src/vm/BooleanObject-inl.h new file mode 100644 index 0000000000..a8d9376403 --- /dev/null +++ b/js/src/vm/BooleanObject-inl.h @@ -0,0 +1,28 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BooleanObject_inl_h +#define vm_BooleanObject_inl_h + +#include "vm/BooleanObject.h" + +#include "vm/JSObject-inl.h" + +namespace js { + +inline BooleanObject* BooleanObject::create( + JSContext* cx, bool b, HandleObject proto /* = nullptr */) { + BooleanObject* obj = NewObjectWithClassProto(cx, proto); + if (!obj) { + return nullptr; + } + obj->setPrimitiveValue(b); + return obj; +} + +} // namespace js + +#endif /* vm_BooleanObject_inl_h */ diff --git a/js/src/vm/BooleanObject.h b/js/src/vm/BooleanObject.h new file mode 100644 index 0000000000..123f255d6a --- /dev/null +++ b/js/src/vm/BooleanObject.h @@ -0,0 +1,44 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BooleanObject_h +#define vm_BooleanObject_h + +#include "vm/NativeObject.h" + +namespace js { + +class BooleanObject : public NativeObject { + /* Stores this Boolean object's [[PrimitiveValue]]. */ + static const unsigned PRIMITIVE_VALUE_SLOT = 0; + + static const ClassSpec classSpec_; + + public: + static const unsigned RESERVED_SLOTS = 1; + + static const JSClass class_; + + /* + * Creates a new Boolean object boxing the given primitive bool. + * If proto is nullptr, the [[Prototype]] will default to Boolean.prototype. + */ + static inline BooleanObject* create(JSContext* cx, bool b, + HandleObject proto = nullptr); + + bool unbox() const { return getFixedSlot(PRIMITIVE_VALUE_SLOT).toBoolean(); } + + private: + static JSObject* createPrototype(JSContext* cx, JSProtoKey key); + + inline void setPrimitiveValue(bool b) { + setFixedSlot(PRIMITIVE_VALUE_SLOT, BooleanValue(b)); + } +}; + +} // namespace js + +#endif /* vm_BooleanObject_h */ diff --git a/js/src/vm/BoundFunctionObject.cpp b/js/src/vm/BoundFunctionObject.cpp new file mode 100644 index 0000000000..6d11611f90 --- /dev/null +++ b/js/src/vm/BoundFunctionObject.cpp @@ -0,0 +1,534 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/BoundFunctionObject.h" + +#include + +#include "util/StringBuffer.h" +#include "vm/Interpreter.h" +#include "vm/Shape.h" +#include "vm/Stack.h" + +#include "gc/ObjectKind-inl.h" +#include "vm/JSFunction-inl.h" +#include "vm/JSObject-inl.h" +#include "vm/NativeObject-inl.h" +#include "vm/Shape-inl.h" + +using namespace js; + +// Helper function to initialize `args` with all bound arguments + the arguments +// supplied in `callArgs`. +template +static MOZ_ALWAYS_INLINE void FillArguments(Args& args, + BoundFunctionObject* bound, + size_t numBoundArgs, + const CallArgs& callArgs) { + MOZ_ASSERT(args.length() == numBoundArgs + callArgs.length()); + + if (numBoundArgs <= BoundFunctionObject::MaxInlineBoundArgs) { + for (size_t i = 0; i < numBoundArgs; i++) { + args[i].set(bound->getInlineBoundArg(i)); + } + } else { + ArrayObject* boundArgs = bound->getBoundArgsArray(); + for (size_t i = 0; i < numBoundArgs; i++) { + args[i].set(boundArgs->getDenseElement(i)); + } + } + + for (size_t i = 0; i < callArgs.length(); i++) { + args[numBoundArgs + i].set(callArgs[i]); + } +} + +// ES2023 10.4.1.1 [[Call]] +// https://tc39.es/ecma262/#sec-bound-function-exotic-objects-call-thisargument-argumentslist +// static +bool BoundFunctionObject::call(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + Rooted bound(cx, + &args.callee().as()); + + // Step 1. + Rooted target(cx, bound->getTargetVal()); + + // Step 2. + Rooted boundThis(cx, bound->getBoundThis()); + + // Steps 3-4. + size_t numBoundArgs = bound->numBoundArgs(); + InvokeArgs args2(cx); + if (!args2.init(cx, uint64_t(numBoundArgs) + args.length())) { + return false; + } + FillArguments(args2, bound, numBoundArgs, args); + + // Step 5. + return Call(cx, target, boundThis, args2, args.rval()); +} + +// ES2023 10.4.1.2 [[Construct]] +// https://tc39.es/ecma262/#sec-bound-function-exotic-objects-construct-argumentslist-newtarget +// static +bool BoundFunctionObject::construct(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + Rooted bound(cx, + &args.callee().as()); + + MOZ_ASSERT(bound->isConstructor(), + "shouldn't have called this hook if not a constructor"); + + // Step 1. + Rooted target(cx, bound->getTargetVal()); + + // Step 2. + MOZ_ASSERT(IsConstructor(target)); + + // Steps 3-4. + size_t numBoundArgs = bound->numBoundArgs(); + ConstructArgs args2(cx); + if (!args2.init(cx, uint64_t(numBoundArgs) + args.length())) { + return false; + } + FillArguments(args2, bound, numBoundArgs, args); + + // Step 5. + Rooted newTarget(cx, args.newTarget()); + if (newTarget == ObjectValue(*bound)) { + newTarget = target; + } + + // Step 6. + Rooted res(cx); + if (!Construct(cx, target, args2, newTarget, &res)) { + return false; + } + args.rval().setObject(*res); + return true; +} + +// static +JSString* BoundFunctionObject::funToString(JSContext* cx, Handle obj, + bool isToSource) { + // Implementation of the funToString hook used by Function.prototype.toString. + + // For the non-standard toSource extension, we include "bound" to indicate + // it's a bound function. + if (isToSource) { + static constexpr std::string_view nativeCodeBound = + "function bound() {\n [native code]\n}"; + return NewStringCopy(cx, nativeCodeBound); + } + + static constexpr std::string_view nativeCode = + "function() {\n [native code]\n}"; + return NewStringCopy(cx, nativeCode); +} + +// static +SharedShape* BoundFunctionObject::assignInitialShape( + JSContext* cx, Handle obj) { + MOZ_ASSERT(obj->empty()); + + constexpr PropertyFlags propFlags = {PropertyFlag::Configurable}; + if (!NativeObject::addPropertyInReservedSlot(cx, obj, cx->names().length, + LengthSlot, propFlags)) { + return nullptr; + } + if (!NativeObject::addPropertyInReservedSlot(cx, obj, cx->names().name, + NameSlot, propFlags)) { + return nullptr; + } + + SharedShape* shape = obj->sharedShape(); + if (shape->proto() == TaggedProto(&cx->global()->getFunctionPrototype())) { + cx->global()->setBoundFunctionShapeWithDefaultProto(shape); + } + return shape; +} + +static MOZ_ALWAYS_INLINE bool ComputeLengthValue( + JSContext* cx, Handle bound, Handle target, + size_t numBoundArgs, double* length) { + *length = 0.0; + + // Try to avoid invoking the JSFunction resolve hook. + if (target->is() && + !target->as().hasResolvedLength()) { + uint16_t targetLength; + if (!JSFunction::getUnresolvedLength(cx, target.as(), + &targetLength)) { + return false; + } + + if (size_t(targetLength) > numBoundArgs) { + *length = size_t(targetLength) - numBoundArgs; + } + return true; + } + + // Use a fast path for getting the .length value if the target is a bound + // function with its initial shape. + Value targetLength; + if (target->is() && target->shape() == bound->shape()) { + BoundFunctionObject* targetFn = &target->as(); + targetLength = targetFn->getLengthForInitialShape(); + } else { + bool hasLength; + Rooted key(cx, NameToId(cx->names().length)); + if (!HasOwnProperty(cx, target, key, &hasLength)) { + return false; + } + + if (!hasLength) { + return true; + } + + Rooted targetLengthRoot(cx); + if (!GetProperty(cx, target, target, key, &targetLengthRoot)) { + return false; + } + targetLength = targetLengthRoot; + } + + if (targetLength.isNumber()) { + *length = std::max( + 0.0, JS::ToInteger(targetLength.toNumber()) - double(numBoundArgs)); + } + return true; +} + +static MOZ_ALWAYS_INLINE JSAtom* AppendBoundFunctionPrefix(JSContext* cx, + JSString* str) { + auto& cache = cx->zone()->boundPrefixCache(); + + JSAtom* strAtom = str->isAtom() ? &str->asAtom() : nullptr; + if (strAtom) { + if (auto p = cache.lookup(strAtom)) { + return p->value(); + } + } + + StringBuffer sb(cx); + if (!sb.append("bound ") || !sb.append(str)) { + return nullptr; + } + JSAtom* atom = sb.finishAtom(); + if (!atom) { + return nullptr; + } + + if (strAtom) { + (void)cache.putNew(strAtom, atom); + } + return atom; +} + +static MOZ_ALWAYS_INLINE JSAtom* ComputeNameValue( + JSContext* cx, Handle bound, + Handle target) { + // Try to avoid invoking the JSFunction resolve hook. + JSString* name = nullptr; + if (target->is() && !target->as().hasResolvedName()) { + JSFunction* targetFn = &target->as(); + name = targetFn->infallibleGetUnresolvedName(cx); + } else { + // Use a fast path for getting the .name value if the target is a bound + // function with its initial shape. + Value targetName; + if (target->is() && + target->shape() == bound->shape()) { + BoundFunctionObject* targetFn = &target->as(); + targetName = targetFn->getNameForInitialShape(); + } else { + Rooted targetNameRoot(cx); + if (!GetProperty(cx, target, target, cx->names().name, &targetNameRoot)) { + return nullptr; + } + targetName = targetNameRoot; + } + if (!targetName.isString()) { + return cx->names().boundWithSpace; + } + name = targetName.toString(); + } + + return AppendBoundFunctionPrefix(cx, name); +} + +// ES2023 20.2.3.2 Function.prototype.bind +// https://tc39.es/ecma262/#sec-function.prototype.bind +// static +bool BoundFunctionObject::functionBind(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Steps 1-2. + if (!IsCallable(args.thisv())) { + ReportIncompatibleMethod(cx, args, &FunctionClass); + return false; + } + + if (MOZ_UNLIKELY(args.length() > ARGS_LENGTH_MAX)) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_TOO_MANY_ARGUMENTS); + return false; + } + + Rooted target(cx, &args.thisv().toObject()); + + BoundFunctionObject* bound = + functionBindImpl(cx, target, args.array(), args.length(), nullptr); + if (!bound) { + return false; + } + + // Step 11. + args.rval().setObject(*bound); + return true; +} + +// ES2023 20.2.3.2 Function.prototype.bind +// https://tc39.es/ecma262/#sec-function.prototype.bind +// +// ES2023 10.4.1.3 BoundFunctionCreate +// https://tc39.es/ecma262/#sec-boundfunctioncreate +// +// BoundFunctionCreate has been inlined in Function.prototype.bind for +// performance reasons. +// +// static +BoundFunctionObject* BoundFunctionObject::functionBindImpl( + JSContext* cx, Handle target, Value* args, uint32_t argc, + Handle maybeBound) { + MOZ_ASSERT(target->isCallable()); + + // Make sure the arguments on the stack are rooted when we're called directly + // from JIT code. + RootedExternalValueArray argsRoot(cx, argc, args); + + size_t numBoundArgs = argc > 0 ? argc - 1 : 0; + MOZ_ASSERT(numBoundArgs <= ARGS_LENGTH_MAX, "ensured by callers"); + + // If this assertion fails, make sure we use the correct AllocKind and that we + // use all of its slots (consider increasing MaxInlineBoundArgs). + static_assert(gc::GetGCKindSlots(allocKind) == SlotCount); + + // ES2023 10.4.1.3 BoundFunctionCreate + // Steps 1-5. + Rooted bound(cx); + if (maybeBound) { + // We allocated a bound function in JIT code. In the uncommon case of the + // target not having Function.prototype as proto, we have to set the right + // proto here. + bound = maybeBound; + if (MOZ_UNLIKELY(bound->staticPrototype() != target->staticPrototype())) { + Rooted proto(cx, target->staticPrototype()); + if (!SetPrototype(cx, bound, proto)) { + return nullptr; + } + } + } else { + // Step 1. + Rooted proto(cx); + if (!GetPrototype(cx, target, &proto)) { + return nullptr; + } + + // Steps 2-5. + if (proto == &cx->global()->getFunctionPrototype() && + cx->global()->maybeBoundFunctionShapeWithDefaultProto()) { + Rooted shape( + cx, cx->global()->maybeBoundFunctionShapeWithDefaultProto()); + JSObject* obj = + NativeObject::create(cx, allocKind, gc::Heap::Default, shape); + if (!obj) { + return nullptr; + } + bound = &obj->as(); + } else { + bound = NewObjectWithGivenProto(cx, proto); + if (!bound) { + return nullptr; + } + if (!SharedShape::ensureInitialCustomShape(cx, + bound)) { + return nullptr; + } + } + } + + MOZ_ASSERT(bound->lookupPure(cx->names().length)->slot() == LengthSlot); + MOZ_ASSERT(bound->lookupPure(cx->names().name)->slot() == NameSlot); + + // Steps 6 and 9. + bound->initFlags(numBoundArgs, target->isConstructor()); + + // Step 7. + bound->initReservedSlot(TargetSlot, ObjectValue(*target)); + + // Step 8. + if (argc > 0) { + bound->initReservedSlot(BoundThisSlot, args[0]); + } + + if (numBoundArgs <= MaxInlineBoundArgs) { + for (size_t i = 0; i < numBoundArgs; i++) { + bound->initReservedSlot(BoundArg0Slot + i, args[i + 1]); + } + } else { + ArrayObject* arr = NewDenseCopiedArray(cx, numBoundArgs, args + 1); + if (!arr) { + return nullptr; + } + bound->initReservedSlot(BoundArg0Slot, ObjectValue(*arr)); + } + + // ES2023 20.2.3.2 Function.prototype.bind + // Step 4. + double length = 0.0; + + // Steps 5-6. + if (!ComputeLengthValue(cx, bound, target, numBoundArgs, &length)) { + return nullptr; + } + + // Step 7. + bound->initLength(length); + + // Steps 8-9. + JSAtom* name = ComputeNameValue(cx, bound, target); + if (!name) { + return nullptr; + } + + // Step 10. + bound->initName(name); + + // Step 11. + return bound; +} + +// static +BoundFunctionObject* BoundFunctionObject::createWithTemplate( + JSContext* cx, Handle templateObj) { + Rooted shape(cx, templateObj->sharedShape()); + JSObject* obj = NativeObject::create(cx, allocKind, gc::Heap::Default, shape); + if (!obj) { + return nullptr; + } + BoundFunctionObject* bound = &obj->as(); + bound->initFlags(templateObj->numBoundArgs(), templateObj->isConstructor()); + bound->initLength(templateObj->getLengthForInitialShape().toInt32()); + bound->initName(&templateObj->getNameForInitialShape().toString()->asAtom()); + return bound; +} + +// static +BoundFunctionObject* BoundFunctionObject::functionBindSpecializedBaseline( + JSContext* cx, Handle target, Value* args, uint32_t argc, + Handle templateObj) { + // Root the Values on the stack. + RootedExternalValueArray argsRoot(cx, argc, args); + + MOZ_ASSERT(target->is() || target->is()); + MOZ_ASSERT(target->isCallable()); + MOZ_ASSERT(target->isConstructor() == templateObj->isConstructor()); + MOZ_ASSERT(target->staticPrototype() == templateObj->staticPrototype()); + + size_t numBoundArgs = argc > 0 ? argc - 1 : 0; + MOZ_ASSERT(numBoundArgs <= MaxInlineBoundArgs); + + BoundFunctionObject* bound = createWithTemplate(cx, templateObj); + if (!bound) { + return nullptr; + } + + MOZ_ASSERT(bound->lookupPure(cx->names().length)->slot() == LengthSlot); + MOZ_ASSERT(bound->lookupPure(cx->names().name)->slot() == NameSlot); + + bound->initReservedSlot(TargetSlot, ObjectValue(*target)); + if (argc > 0) { + bound->initReservedSlot(BoundThisSlot, args[0]); + } + for (size_t i = 0; i < numBoundArgs; i++) { + bound->initReservedSlot(BoundArg0Slot + i, args[i + 1]); + } + return bound; +} + +// static +BoundFunctionObject* BoundFunctionObject::createTemplateObject(JSContext* cx) { + Rooted proto(cx, &cx->global()->getFunctionPrototype()); + Rooted bound( + cx, NewTenuredObjectWithGivenProto(cx, proto)); + if (!bound) { + return nullptr; + } + if (!SharedShape::ensureInitialCustomShape(cx, bound)) { + return nullptr; + } + return bound; +} + +bool BoundFunctionObject::initTemplateSlotsForSpecializedBind( + JSContext* cx, uint32_t numBoundArgs, bool targetIsConstructor, + uint32_t targetLength, JSAtom* targetName) { + size_t len = 0; + if (targetLength > numBoundArgs) { + len = targetLength - numBoundArgs; + } + + JSAtom* name = AppendBoundFunctionPrefix(cx, targetName); + if (!name) { + return false; + } + + initFlags(numBoundArgs, targetIsConstructor); + initLength(len); + initName(name); + return true; +} + +static const JSClassOps classOps = { + nullptr, // addProperty + nullptr, // delProperty + nullptr, // enumerate + nullptr, // newEnumerate + nullptr, // resolve + nullptr, // mayResolve + nullptr, // finalize + BoundFunctionObject::call, // call + BoundFunctionObject::construct, // construct + nullptr, // trace +}; + +static const ObjectOps objOps = { + nullptr, // lookupProperty + nullptr, // qdefineProperty + nullptr, // hasProperty + nullptr, // getProperty + nullptr, // setProperty + nullptr, // getOwnPropertyDescriptor + nullptr, // deleteProperty + nullptr, // getElements + BoundFunctionObject::funToString, // funToString +}; + +const JSClass BoundFunctionObject::class_ = { + "BoundFunctionObject", + // Note: bound functions don't have their own constructor or prototype (they + // use the prototype of the target object), but we give them a JSProtoKey + // because that's what Xray wrappers use to identify builtin objects. + JSCLASS_HAS_CACHED_PROTO(JSProto_BoundFunction) | + JSCLASS_HAS_RESERVED_SLOTS(BoundFunctionObject::SlotCount), + &classOps, + JS_NULL_CLASS_SPEC, + JS_NULL_CLASS_EXT, + &objOps, +}; diff --git a/js/src/vm/BoundFunctionObject.h b/js/src/vm/BoundFunctionObject.h new file mode 100644 index 0000000000..566bdc0bed --- /dev/null +++ b/js/src/vm/BoundFunctionObject.h @@ -0,0 +1,174 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BoundFunctionObject_h +#define vm_BoundFunctionObject_h + +#include "jstypes.h" + +#include "gc/Policy.h" +#include "vm/ArrayObject.h" +#include "vm/JSAtom.h" +#include "vm/JSObject.h" + +namespace js { + +// Implementation of Bound Function Exotic Objects. +// ES2023 10.4.1 +// https://tc39.es/ecma262/#sec-bound-function-exotic-objects +class BoundFunctionObject : public NativeObject { + public: + static const JSClass class_; + + // FlagsSlot uses the low bit for the is-constructor flag and the other bits + // for the number of arguments. + static constexpr size_t IsConstructorFlag = 0b1; + static constexpr size_t NumBoundArgsShift = 1; + + // The maximum number of bound arguments that can be stored inline in + // BoundArg*Slot. + static constexpr size_t MaxInlineBoundArgs = 3; + + private: + enum { + // The [[BoundTargetFunction]] (a callable object). + TargetSlot, + + // The number of arguments + the is-constructor flag, stored as Int32Value. + FlagsSlot, + + // The [[BoundThis]] Value. + BoundThisSlot, + + // The [[BoundArguments]]. If numBoundArgs exceeds MaxInlineBoundArgs, + // BoundArg0Slot will contain an array object that stores the values and the + // other two slots will be unused. + BoundArg0Slot, + BoundArg1Slot, + BoundArg2Slot, + + // Initial slots for the `length` and `name` own data properties. Note that + // these properties are configurable, so these slots can be mutated when the + // object is exposed to JS. + LengthSlot, + NameSlot, + + SlotCount + }; + + // The AllocKind should match SlotCount. See assertion in functionBindImpl. + static constexpr gc::AllocKind allocKind = gc::AllocKind::OBJECT8_BACKGROUND; + + void initFlags(size_t numBoundArgs, bool isConstructor) { + int32_t val = (numBoundArgs << NumBoundArgsShift) | isConstructor; + initReservedSlot(FlagsSlot, Int32Value(val)); + } + + public: + size_t numBoundArgs() const { + int32_t v = getReservedSlot(FlagsSlot).toInt32(); + MOZ_ASSERT(v >= 0); + return v >> NumBoundArgsShift; + } + bool isConstructor() const { + int32_t v = getReservedSlot(FlagsSlot).toInt32(); + return v & IsConstructorFlag; + } + + Value getTargetVal() const { return getReservedSlot(TargetSlot); } + JSObject* getTarget() const { return &getTargetVal().toObject(); } + + Value getBoundThis() const { return getReservedSlot(BoundThisSlot); } + + Value getInlineBoundArg(size_t i) const { + MOZ_ASSERT(i < numBoundArgs()); + MOZ_ASSERT(numBoundArgs() <= MaxInlineBoundArgs); + return getReservedSlot(BoundArg0Slot + i); + } + ArrayObject* getBoundArgsArray() const { + MOZ_ASSERT(numBoundArgs() > MaxInlineBoundArgs); + return &getReservedSlot(BoundArg0Slot).toObject().as(); + } + Value getBoundArg(size_t i) const { + MOZ_ASSERT(i < numBoundArgs()); + if (numBoundArgs() <= MaxInlineBoundArgs) { + return getInlineBoundArg(i); + } + return getBoundArgsArray()->getDenseElement(i); + } + + void initLength(double len) { + MOZ_ASSERT(getReservedSlot(LengthSlot).isUndefined()); + initReservedSlot(LengthSlot, NumberValue(len)); + } + void initName(JSAtom* name) { + MOZ_ASSERT(getReservedSlot(NameSlot).isUndefined()); + initReservedSlot(NameSlot, StringValue(name)); + } + + // Get the `length` and `name` property values when the object has the + // original shape. See comment for LengthSlot and NameSlot. + Value getLengthForInitialShape() const { return getReservedSlot(LengthSlot); } + Value getNameForInitialShape() const { return getReservedSlot(NameSlot); } + + // The [[Call]] and [[Construct]] hooks. + static bool call(JSContext* cx, unsigned argc, Value* vp); + static bool construct(JSContext* cx, unsigned argc, Value* vp); + + // The JSFunToStringOp implementation for Function.prototype.toString. + static JSString* funToString(JSContext* cx, Handle obj, + bool isToSource); + + // Implementation of Function.prototype.bind. + static bool functionBind(JSContext* cx, unsigned argc, Value* vp); + + static SharedShape* assignInitialShape(JSContext* cx, + Handle obj); + + static BoundFunctionObject* functionBindImpl( + JSContext* cx, Handle target, Value* args, uint32_t argc, + Handle maybeBound); + + static BoundFunctionObject* createWithTemplate( + JSContext* cx, Handle templateObj); + static BoundFunctionObject* functionBindSpecializedBaseline( + JSContext* cx, Handle target, Value* args, uint32_t argc, + Handle templateObj); + + static BoundFunctionObject* createTemplateObject(JSContext* cx); + + bool initTemplateSlotsForSpecializedBind(JSContext* cx, uint32_t numBoundArgs, + bool targetIsConstructor, + uint32_t targetLength, + JSAtom* targetName); + + static constexpr size_t offsetOfTargetSlot() { + return getFixedSlotOffset(TargetSlot); + } + static constexpr size_t offsetOfFlagsSlot() { + return getFixedSlotOffset(FlagsSlot); + } + static constexpr size_t offsetOfBoundThisSlot() { + return getFixedSlotOffset(BoundThisSlot); + } + static constexpr size_t offsetOfFirstInlineBoundArg() { + return getFixedSlotOffset(BoundArg0Slot); + } + static constexpr size_t offsetOfLengthSlot() { + return getFixedSlotOffset(LengthSlot); + } + static constexpr size_t offsetOfNameSlot() { + return getFixedSlotOffset(NameSlot); + } + + static constexpr size_t targetSlot() { return TargetSlot; } + static constexpr size_t boundThisSlot() { return BoundThisSlot; } + static constexpr size_t firstInlineBoundArgSlot() { return BoundArg0Slot; } +}; + +}; // namespace js + +#endif /* vm_BoundFunctionObject_h */ diff --git a/js/src/vm/BuildId.cpp b/js/src/vm/BuildId.cpp new file mode 100644 index 0000000000..6183a79014 --- /dev/null +++ b/js/src/vm/BuildId.cpp @@ -0,0 +1,27 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* SpiderMonkey buildId-related functionality. */ + +#include "js/BuildId.h" // JS::BuildIdCharVector, JS::BuildIdOp, JS::GetOptimizedEncodingBuildId, JS::SetProcessBuildIdOp + +#include "mozilla/Atomics.h" // mozilla::Atomic + +#include "jstypes.h" // JS_PUBLIC_API + +#include "vm/Runtime.h" // js::GetBuildId +#include "wasm/WasmModule.h" // js::wasm::GetOptimizedEncodingBuildId + +mozilla::Atomic js::GetBuildId; + +JS_PUBLIC_API void JS::SetProcessBuildIdOp(JS::BuildIdOp buildIdOp) { + js::GetBuildId = buildIdOp; +} + +JS_PUBLIC_API bool JS::GetOptimizedEncodingBuildId( + JS::BuildIdCharVector* buildId) { + return js::wasm::GetOptimizedEncodingBuildId(buildId); +} diff --git a/js/src/vm/BuiltinObjectKind.cpp b/js/src/vm/BuiltinObjectKind.cpp new file mode 100644 index 0000000000..dbc6a9ccdc --- /dev/null +++ b/js/src/vm/BuiltinObjectKind.cpp @@ -0,0 +1,205 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/BuiltinObjectKind.h" + +#include "jspubtd.h" + +#include "frontend/ParserAtom.h" +#include "vm/GlobalObject.h" + +using namespace js; + +static JSProtoKey ToProtoKey(BuiltinObjectKind kind) { + switch (kind) { + case BuiltinObjectKind::Array: + return JSProto_Array; + case BuiltinObjectKind::ArrayBuffer: + return JSProto_ArrayBuffer; + case BuiltinObjectKind::Int32Array: + return JSProto_Int32Array; + case BuiltinObjectKind::Iterator: + return JSProto_Iterator; + case BuiltinObjectKind::Map: + return JSProto_Map; + case BuiltinObjectKind::Promise: + return JSProto_Promise; + case BuiltinObjectKind::RegExp: + return JSProto_RegExp; + case BuiltinObjectKind::Set: + return JSProto_Set; + case BuiltinObjectKind::SharedArrayBuffer: + return JSProto_SharedArrayBuffer; + case BuiltinObjectKind::Symbol: + return JSProto_Symbol; + + case BuiltinObjectKind::FunctionPrototype: + return JSProto_Function; + case BuiltinObjectKind::ObjectPrototype: + return JSProto_Object; + case BuiltinObjectKind::RegExpPrototype: + return JSProto_RegExp; + case BuiltinObjectKind::StringPrototype: + return JSProto_String; + + case BuiltinObjectKind::DateTimeFormatPrototype: + return JSProto_DateTimeFormat; + case BuiltinObjectKind::NumberFormatPrototype: + return JSProto_NumberFormat; + + case BuiltinObjectKind::None: + break; + } + MOZ_CRASH("Unexpected builtin object kind"); +} + +static bool IsPrototype(BuiltinObjectKind kind) { + switch (kind) { + case BuiltinObjectKind::Array: + case BuiltinObjectKind::ArrayBuffer: + case BuiltinObjectKind::Int32Array: + case BuiltinObjectKind::Iterator: + case BuiltinObjectKind::Map: + case BuiltinObjectKind::Promise: + case BuiltinObjectKind::RegExp: + case BuiltinObjectKind::Set: + case BuiltinObjectKind::SharedArrayBuffer: + case BuiltinObjectKind::Symbol: + return false; + + case BuiltinObjectKind::FunctionPrototype: + case BuiltinObjectKind::ObjectPrototype: + case BuiltinObjectKind::RegExpPrototype: + case BuiltinObjectKind::StringPrototype: + return true; + + case BuiltinObjectKind::DateTimeFormatPrototype: + case BuiltinObjectKind::NumberFormatPrototype: + return true; + + case BuiltinObjectKind::None: + break; + } + MOZ_CRASH("Unexpected builtin object kind"); +} + +BuiltinObjectKind js::BuiltinConstructorForName( + frontend::TaggedParserAtomIndex name) { + if (name == frontend::TaggedParserAtomIndex::WellKnown::Array()) { + return BuiltinObjectKind::Array; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::ArrayBuffer()) { + return BuiltinObjectKind::ArrayBuffer; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::Int32Array()) { + return BuiltinObjectKind::Int32Array; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::Iterator()) { + return BuiltinObjectKind::Iterator; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::Map()) { + return BuiltinObjectKind::Map; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::Promise()) { + return BuiltinObjectKind::Promise; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::RegExp()) { + return BuiltinObjectKind::RegExp; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::Set()) { + return BuiltinObjectKind::Set; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::SharedArrayBuffer()) { + return BuiltinObjectKind::SharedArrayBuffer; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::Symbol()) { + return BuiltinObjectKind::Symbol; + } + return BuiltinObjectKind::None; +} + +BuiltinObjectKind js::BuiltinPrototypeForName( + frontend::TaggedParserAtomIndex name) { + if (name == frontend::TaggedParserAtomIndex::WellKnown::Function()) { + return BuiltinObjectKind::FunctionPrototype; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::Object()) { + return BuiltinObjectKind::ObjectPrototype; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::RegExp()) { + return BuiltinObjectKind::RegExpPrototype; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::String()) { + return BuiltinObjectKind::StringPrototype; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::DateTimeFormat()) { + return BuiltinObjectKind::DateTimeFormatPrototype; + } + if (name == frontend::TaggedParserAtomIndex::WellKnown::NumberFormat()) { + return BuiltinObjectKind::NumberFormatPrototype; + } + return BuiltinObjectKind::None; +} + +JSObject* js::MaybeGetBuiltinObject(GlobalObject* global, + BuiltinObjectKind kind) { + JSProtoKey key = ToProtoKey(kind); + if (IsPrototype(kind)) { + return global->maybeGetPrototype(key); + } + return global->maybeGetConstructor(key); +} + +JSObject* js::GetOrCreateBuiltinObject(JSContext* cx, BuiltinObjectKind kind) { + JSProtoKey key = ToProtoKey(kind); + if (IsPrototype(kind)) { + return GlobalObject::getOrCreatePrototype(cx, key); + } + return GlobalObject::getOrCreateConstructor(cx, key); +} + +const char* js::BuiltinObjectName(BuiltinObjectKind kind) { + switch (kind) { + case BuiltinObjectKind::Array: + return "Array"; + case BuiltinObjectKind::ArrayBuffer: + return "ArrayBuffer"; + case BuiltinObjectKind::Int32Array: + return "Int32Array"; + case BuiltinObjectKind::Iterator: + return "Iterator"; + case BuiltinObjectKind::Map: + return "Map"; + case BuiltinObjectKind::Promise: + return "Promise"; + case BuiltinObjectKind::RegExp: + return "RegExp"; + case BuiltinObjectKind::SharedArrayBuffer: + return "SharedArrayBuffer"; + case BuiltinObjectKind::Set: + return "Set"; + case BuiltinObjectKind::Symbol: + return "Symbol"; + + case BuiltinObjectKind::FunctionPrototype: + return "Function.prototype"; + case BuiltinObjectKind::ObjectPrototype: + return "Object.prototype"; + case BuiltinObjectKind::RegExpPrototype: + return "RegExp.prototype"; + case BuiltinObjectKind::StringPrototype: + return "String.prototype"; + + case BuiltinObjectKind::DateTimeFormatPrototype: + return "DateTimeFormat.prototype"; + case BuiltinObjectKind::NumberFormatPrototype: + return "NumberFormat.prototype"; + + case BuiltinObjectKind::None: + break; + } + MOZ_CRASH("Unexpected builtin object kind"); +} diff --git a/js/src/vm/BuiltinObjectKind.h b/js/src/vm/BuiltinObjectKind.h new file mode 100644 index 0000000000..30808ef977 --- /dev/null +++ b/js/src/vm/BuiltinObjectKind.h @@ -0,0 +1,88 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BuiltinObjectKind_h +#define vm_BuiltinObjectKind_h + +#include + +#include "jstypes.h" + +class JS_PUBLIC_API JSAtom; +struct JS_PUBLIC_API JSContext; +class JS_PUBLIC_API JSObject; + +namespace js { + +namespace frontend { +class TaggedParserAtomIndex; +} + +class GlobalObject; + +/** + * Built-in objects used by the GetBuiltinConstructor and GetBuiltinPrototype + * self-hosted intrinsics. + */ +enum class BuiltinObjectKind : uint8_t { + // Built-in constructors. + Array, + ArrayBuffer, + Int32Array, + Iterator, + Map, + Promise, + RegExp, + Set, + SharedArrayBuffer, + Symbol, + + // Built-in prototypes. + FunctionPrototype, + ObjectPrototype, + RegExpPrototype, + StringPrototype, + + // Built-in Intl prototypes. + DateTimeFormatPrototype, + NumberFormatPrototype, + + // Invalid placeholder. + None, +}; + +/** + * Return the BuiltinObjectKind for the given constructor name. Return + * BuiltinObjectKind::None if no matching constructor was found. + */ +BuiltinObjectKind BuiltinConstructorForName( + frontend::TaggedParserAtomIndex name); + +/** + * Return the BuiltinObjectKind for the given prototype name. Return + * BuiltinObjectKind::None if no matching prototype was found. + */ +BuiltinObjectKind BuiltinPrototypeForName(frontend::TaggedParserAtomIndex name); + +/** + * Return the built-in object if already created for the given global. Otherwise + * return nullptr. + */ +JSObject* MaybeGetBuiltinObject(GlobalObject* global, BuiltinObjectKind kind); + +/** + * Return the built-in object for the given global. + */ +JSObject* GetOrCreateBuiltinObject(JSContext* cx, BuiltinObjectKind kind); + +/** + * Return the display name for a built-in object. + */ +const char* BuiltinObjectName(BuiltinObjectKind kind); + +} // namespace js + +#endif /* vm_BuiltinObjectKind_h */ diff --git a/js/src/vm/BytecodeFormatFlags.h b/js/src/vm/BytecodeFormatFlags.h new file mode 100644 index 0000000000..893f0f0823 --- /dev/null +++ b/js/src/vm/BytecodeFormatFlags.h @@ -0,0 +1,61 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeFormatFlags_h +#define vm_BytecodeFormatFlags_h + +/* + * [SMDOC] Bytecode Format flags (JOF_*) + */ +enum { + JOF_BYTE = 0, /* single bytecode, no immediates */ + JOF_UINT8 = 1, /* unspecified uint8_t argument */ + JOF_UINT16 = 2, /* unspecified uint16_t argument */ + JOF_UINT24 = 3, /* unspecified uint24_t argument */ + JOF_UINT32 = 4, /* unspecified uint32_t argument */ + JOF_INT8 = 5, /* int8_t literal */ + JOF_INT32 = 6, /* int32_t literal */ + JOF_JUMP = 7, /* int32_t jump offset */ + JOF_TABLESWITCH = 8, /* table switch */ + JOF_ENVCOORD = 9, /* embedded ScopeCoordinate immediate */ + JOF_ARGC = 10, /* uint16_t argument count */ + JOF_QARG = 11, /* function argument index */ + JOF_LOCAL = 12, /* var or block-local variable */ + JOF_RESUMEINDEX = 13, /* yield or await resume index */ + JOF_DOUBLE = 14, /* inline DoubleValue */ + JOF_GCTHING = 15, /* uint32_t generic gc-thing index */ + JOF_ATOM = 16, /* uint32_t constant index */ + JOF_OBJECT = 17, /* uint32_t object index */ + JOF_REGEXP = 18, /* uint32_t regexp index */ + JOF_SCOPE = 19, /* uint32_t scope index */ + JOF_BIGINT = 20, /* uint32_t index for BigInt value */ + JOF_ICINDEX = 21, /* uint32_t IC index */ + JOF_LOOPHEAD = 22, /* JSOp::LoopHead, combines JOF_ICINDEX and JOF_UINT8 */ + JOF_TWO_UINT8 = 23, /* A pair of unspecified uint8_t arguments */ + JOF_DEBUGCOORD = 24, /* An embedded ScopeCoordinate immediate that may + traverse DebugEnvironmentProxies*/ + JOF_SHAPE = 25, /* uint32_t shape index */ + JOF_STRING = 26, /* uint32_t constant index */ + JOF_TYPEMASK = 0xFF, /* mask for above immediate types */ + + JOF_NAME = 1 << 8, /* name operation */ + JOF_PROP = 2 << 8, /* obj.prop operation */ + JOF_ELEM = 3 << 8, /* obj[index] operation */ + JOF_MODEMASK = 0xFF << 8, /* mask for above addressing modes */ + + JOF_PROPSET = 1 << 16, /* property/element/name set operation */ + JOF_PROPINIT = 1 << 17, /* property/element/name init operation */ + JOF_CHECKSLOPPY = 1 << 18, /* op can only be generated in sloppy mode */ + JOF_CHECKSTRICT = 1 << 19, /* op can only be generated in strict mode */ + JOF_INVOKE = 1 << 20, /* any call, construct, or eval instruction */ + JOF_CONSTRUCT = 1 << 21, /* invoke instruction using [[Construct]] entry */ + JOF_SPREAD = 1 << 22, /* invoke instruction using spread argument */ + JOF_GNAME = 1 << 23, /* predicted global name */ + JOF_IC = 1 << 24, /* baseline may use an IC for this op */ + JOF_USES_ENV = 1 << 25, /* op uses the frame's environment chain */ +}; + +#endif /* vm_BytecodeFormatFlags_h */ diff --git a/js/src/vm/BytecodeIterator-inl.h b/js/src/vm/BytecodeIterator-inl.h new file mode 100644 index 0000000000..37e42fc88d --- /dev/null +++ b/js/src/vm/BytecodeIterator-inl.h @@ -0,0 +1,40 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeIterator_inl_h +#define vm_BytecodeIterator_inl_h + +#include "vm/BytecodeIterator.h" + +#include "vm/JSScript.h" + +namespace js { + +inline BytecodeIterator::BytecodeIterator(const JSScript* script) + : current_(script, script->code()) {} + +// AllBytecodesIterable + +inline BytecodeIterator AllBytecodesIterable::begin() { + return BytecodeIterator(script_); +} + +inline BytecodeIterator AllBytecodesIterable::end() { + return BytecodeIterator(BytecodeLocation(script_, script_->codeEnd())); +} + +// BytecodeLocationRange + +inline BytecodeIterator BytecodeLocationRange::begin() { + return BytecodeIterator(beginLoc_); +} + +inline BytecodeIterator BytecodeLocationRange::end() { + return BytecodeIterator(endLoc_); +} + +} // namespace js +#endif diff --git a/js/src/vm/BytecodeIterator.h b/js/src/vm/BytecodeIterator.h new file mode 100644 index 0000000000..afc84e0451 --- /dev/null +++ b/js/src/vm/BytecodeIterator.h @@ -0,0 +1,85 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeIterator_h +#define vm_BytecodeIterator_h + +#include "vm/BytecodeLocation.h" + +namespace js { + +class BytecodeIterator { + BytecodeLocation current_; + + public: + inline explicit BytecodeIterator(const JSScript* script); + + explicit BytecodeIterator(BytecodeLocation loc) : current_(loc) {} + + BytecodeIterator& operator=(const BytecodeIterator&) = default; + + bool operator==(const BytecodeIterator& other) const { + return other.current_ == current_; + } + + bool operator!=(const BytecodeIterator& other) const { + return !(other.current_ == current_); + } + + const BytecodeLocation& operator*() const { return current_; } + + const BytecodeLocation* operator->() const { return ¤t_; } + + // Pre-increment + BytecodeIterator& operator++() { + current_ = current_.next(); + return *this; + } + + // Post-increment + BytecodeIterator operator++(int) { + BytecodeIterator previous(*this); + current_ = current_.next(); + return previous; + } +}; + +// Given a JSScript, allow the construction of a range based for-loop +// that will visit all script locations in that script. +class AllBytecodesIterable { + const JSScript* script_; + + public: + explicit AllBytecodesIterable(const JSScript* script) : script_(script) {} + + BytecodeIterator begin(); + BytecodeIterator end(); +}; + +// Construct a range based iterator that will visit all bytecode locations +// between two given bytecode locations. +// `beginLoc_` is the bytecode location where the iterator will start, and +// `endLoc_` is the bytecode location where the iterator will end. +class BytecodeLocationRange { + BytecodeLocation beginLoc_; + BytecodeLocation endLoc_; + + public: + explicit BytecodeLocationRange(BytecodeLocation beginLoc, + BytecodeLocation endLoc) + : beginLoc_(beginLoc), endLoc_(endLoc) { +#ifdef DEBUG + MOZ_ASSERT(beginLoc.hasSameScript(endLoc)); +#endif + } + + BytecodeIterator begin(); + BytecodeIterator end(); +}; + +} // namespace js + +#endif diff --git a/js/src/vm/BytecodeLocation-inl.h b/js/src/vm/BytecodeLocation-inl.h new file mode 100644 index 0000000000..46c945ddad --- /dev/null +++ b/js/src/vm/BytecodeLocation-inl.h @@ -0,0 +1,115 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeLocation_inl_h +#define vm_BytecodeLocation_inl_h + +#include "vm/BytecodeLocation.h" + +#include "vm/JSScript.h" + +#include "vm/BytecodeUtil-inl.h" +#include "vm/JSScript-inl.h" + +namespace js { + +inline uint32_t BytecodeLocation::bytecodeToOffset( + const JSScript* script) const { + MOZ_ASSERT(this->isInBounds()); + return script->pcToOffset(this->rawBytecode_); +} + +inline JSAtom* BytecodeLocation::getAtom(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->getAtom(this->rawBytecode_); +} + +inline JSString* BytecodeLocation::getString(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->getString(this->rawBytecode_); +} + +inline PropertyName* BytecodeLocation::getPropertyName( + const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->getName(this->rawBytecode_); +} + +inline JS::BigInt* BytecodeLocation::getBigInt(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + MOZ_ASSERT(is(JSOp::BigInt)); + return script->getBigInt(this->rawBytecode_); +} + +inline JSObject* BytecodeLocation::getObject(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + MOZ_ASSERT(is(JSOp::CallSiteObj) || is(JSOp::Object)); + return script->getObject(this->rawBytecode_); +} + +inline JSFunction* BytecodeLocation::getFunction(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + MOZ_ASSERT(is(JSOp::Lambda) || is(JSOp::FunWithProto)); + return script->getFunction(this->rawBytecode_); +} + +inline js::RegExpObject* BytecodeLocation::getRegExp( + const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + MOZ_ASSERT(is(JSOp::RegExp)); + return script->getRegExp(this->rawBytecode_); +} + +inline js::Scope* BytecodeLocation::getScope(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->getScope(this->rawBytecode_); +} + +inline Scope* BytecodeLocation::innermostScope(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->innermostScope(this->rawBytecode_); +} + +inline uint32_t BytecodeLocation::tableSwitchCaseOffset( + const JSScript* script, uint32_t caseIndex) const { + return script->tableSwitchCaseOffset(this->rawBytecode_, caseIndex); +} + +inline uint32_t BytecodeLocation::getJumpTargetOffset( + const JSScript* script) const { + MOZ_ASSERT(this->isJump()); + return this->bytecodeToOffset(script) + GET_JUMP_OFFSET(this->rawBytecode_); +} + +inline uint32_t BytecodeLocation::getTableSwitchDefaultOffset( + const JSScript* script) const { + MOZ_ASSERT(this->is(JSOp::TableSwitch)); + return this->bytecodeToOffset(script) + GET_JUMP_OFFSET(this->rawBytecode_); +} + +BytecodeLocation BytecodeLocation::getTableSwitchDefaultTarget() const { + MOZ_ASSERT(is(JSOp::TableSwitch)); + return BytecodeLocation(*this, rawBytecode_ + GET_JUMP_OFFSET(rawBytecode_)); +} + +BytecodeLocation BytecodeLocation::getTableSwitchCaseTarget( + const JSScript* script, uint32_t caseIndex) const { + MOZ_ASSERT(is(JSOp::TableSwitch)); + jsbytecode* casePC = script->tableSwitchCasePC(rawBytecode_, caseIndex); + return BytecodeLocation(*this, casePC); +} + +inline uint32_t BytecodeLocation::useCount() const { + return GetUseCount(this->rawBytecode_); +} + +inline uint32_t BytecodeLocation::defCount() const { + return GetDefCount(this->rawBytecode_); +} + +} // namespace js + +#endif diff --git a/js/src/vm/BytecodeLocation.cpp b/js/src/vm/BytecodeLocation.cpp new file mode 100644 index 0000000000..fae05a9275 --- /dev/null +++ b/js/src/vm/BytecodeLocation.cpp @@ -0,0 +1,28 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/BytecodeLocation-inl.h" + +#include "vm/JSScript.h" + +using namespace js; + +#ifdef DEBUG +bool BytecodeLocation::isValid(const JSScript* script) const { + // Note: Don't create a new BytecodeLocation during the implementation of + // this, as it is used in the constructor, and will recurse forever. + return script->contains(*this) || toRawBytecode() == script->codeEnd(); +} + +bool BytecodeLocation::isInBounds(const JSScript* script) const { + return script->contains(*this); +} + +const JSScript* BytecodeLocation::getDebugOnlyScript() const { + return this->debugOnlyScript_; +} + +#endif // DEBUG diff --git a/js/src/vm/BytecodeLocation.h b/js/src/vm/BytecodeLocation.h new file mode 100644 index 0000000000..e5876ed9d2 --- /dev/null +++ b/js/src/vm/BytecodeLocation.h @@ -0,0 +1,354 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeLocation_h +#define vm_BytecodeLocation_h + +#include "frontend/NameAnalysisTypes.h" +#include "js/TypeDecls.h" +#include "vm/AsyncFunctionResolveKind.h" +#include "vm/BuiltinObjectKind.h" +#include "vm/BytecodeUtil.h" +#include "vm/CheckIsObjectKind.h" // CheckIsObjectKind +#include "vm/CompletionKind.h" // CompletionKind +#include "vm/FunctionPrefixKind.h" // FunctionPrefixKind +#include "vm/GeneratorResumeKind.h" + +namespace js { + +using RawBytecodeLocationOffset = uint32_t; + +class PropertyName; +class RegExpObject; + +class BytecodeLocationOffset { + RawBytecodeLocationOffset rawOffset_; + + public: + explicit BytecodeLocationOffset(RawBytecodeLocationOffset offset) + : rawOffset_(offset) {} + + RawBytecodeLocationOffset rawOffset() const { return rawOffset_; } +}; + +using RawBytecode = jsbytecode*; + +// A immutable representation of a program location +// +class BytecodeLocation { + RawBytecode rawBytecode_; +#ifdef DEBUG + const JSScript* debugOnlyScript_; +#endif + + // Construct a new BytecodeLocation, while borrowing scriptIdentity + // from some other BytecodeLocation. + BytecodeLocation(const BytecodeLocation& loc, RawBytecode pc) + : rawBytecode_(pc) +#ifdef DEBUG + , + debugOnlyScript_(loc.debugOnlyScript_) +#endif + { + MOZ_ASSERT(isValid()); + } + + public: + // Disallow the creation of an uninitialized location. + BytecodeLocation() = delete; + + BytecodeLocation(const JSScript* script, RawBytecode pc) + : rawBytecode_(pc) +#ifdef DEBUG + , + debugOnlyScript_(script) +#endif + { + MOZ_ASSERT(isValid()); + } + + RawBytecode toRawBytecode() const { return rawBytecode_; } + +#ifdef DEBUG + // Return true if this bytecode location is valid for the given script. + // This includes the location 1-past the end of the bytecode. + bool isValid(const JSScript* script) const; + + // Return true if this bytecode location is within the bounds of the + // bytecode for a given script. + bool isInBounds(const JSScript* script) const; + + const JSScript* getDebugOnlyScript() const; +#endif + + inline uint32_t bytecodeToOffset(const JSScript* script) const; + + inline uint32_t tableSwitchCaseOffset(const JSScript* script, + uint32_t caseIndex) const; + + inline uint32_t getJumpTargetOffset(const JSScript* script) const; + + inline uint32_t getTableSwitchDefaultOffset(const JSScript* script) const; + + inline BytecodeLocation getTableSwitchDefaultTarget() const; + inline BytecodeLocation getTableSwitchCaseTarget(const JSScript* script, + uint32_t caseIndex) const; + + inline uint32_t useCount() const; + inline uint32_t defCount() const; + + int32_t jumpOffset() const { return GET_JUMP_OFFSET(rawBytecode_); } + + inline JSAtom* getAtom(const JSScript* script) const; + inline JSString* getString(const JSScript* script) const; + inline PropertyName* getPropertyName(const JSScript* script) const; + inline JS::BigInt* getBigInt(const JSScript* script) const; + inline JSObject* getObject(const JSScript* script) const; + inline JSFunction* getFunction(const JSScript* script) const; + inline js::RegExpObject* getRegExp(const JSScript* script) const; + inline js::Scope* getScope(const JSScript* script) const; + + uint32_t getSymbolIndex() const { + MOZ_ASSERT(is(JSOp::Symbol)); + return GET_UINT8(rawBytecode_); + } + + inline Scope* innermostScope(const JSScript* script) const; + +#ifdef DEBUG + bool hasSameScript(const BytecodeLocation& other) const { + return debugOnlyScript_ == other.debugOnlyScript_; + } +#endif + + // Overloaded operators + + bool operator==(const BytecodeLocation& other) const { + MOZ_ASSERT(this->debugOnlyScript_ == other.debugOnlyScript_); + return rawBytecode_ == other.rawBytecode_; + } + + bool operator!=(const BytecodeLocation& other) const { + return !(other == *this); + } + + bool operator<(const BytecodeLocation& other) const { + MOZ_ASSERT(this->debugOnlyScript_ == other.debugOnlyScript_); + return rawBytecode_ < other.rawBytecode_; + } + + // It is traditional to represent the rest of the relational operators + // using operator<, so we don't need to assert for these. + bool operator>(const BytecodeLocation& other) const { return other < *this; } + + bool operator<=(const BytecodeLocation& other) const { + return !(other < *this); + } + + bool operator>=(const BytecodeLocation& other) const { + return !(*this < other); + } + + // Return the next bytecode + BytecodeLocation next() const { + return BytecodeLocation(*this, + rawBytecode_ + GetBytecodeLength(rawBytecode_)); + } + + // Add an offset. + BytecodeLocation operator+(const BytecodeLocationOffset& offset) const { + return BytecodeLocation(*this, rawBytecode_ + offset.rawOffset()); + } + + // Identity Checks + bool is(JSOp op) const { + MOZ_ASSERT(isInBounds()); + return getOp() == op; + } + + // Accessors: + + uint32_t length() const { return GetBytecodeLength(rawBytecode_); } + + bool isJumpTarget() const { return BytecodeIsJumpTarget(getOp()); } + + bool isJump() const { return IsJumpOpcode(getOp()); } + + bool isBackedge() const { return IsBackedgePC(rawBytecode_); } + + bool isBackedgeForLoophead(BytecodeLocation loopHead) const { + return IsBackedgeForLoopHead(rawBytecode_, loopHead.rawBytecode_); + } + + bool opHasIC() const { return BytecodeOpHasIC(getOp()); } + + bool fallsThrough() const { return BytecodeFallsThrough(getOp()); } + + uint32_t icIndex() const { return GET_ICINDEX(rawBytecode_); } + + uint32_t local() const { return GET_LOCALNO(rawBytecode_); } + + uint16_t arg() const { return GET_ARGNO(rawBytecode_); } + + bool isEqualityOp() const { return IsEqualityOp(getOp()); } + + bool isStrictEqualityOp() const { return IsStrictEqualityOp(getOp()); } + + bool isStrictSetOp() const { return IsStrictSetPC(rawBytecode_); } + + bool isNameOp() const { return IsNameOp(getOp()); } + + bool isSpreadOp() const { return IsSpreadOp(getOp()); } + + bool isInvokeOp() const { return IsInvokeOp(getOp()); } + + bool isGetPropOp() const { return IsGetPropOp(getOp()); } + bool isGetElemOp() const { return IsGetElemOp(getOp()); } + + bool isSetPropOp() const { return IsSetPropOp(getOp()); } + bool isSetElemOp() const { return IsSetElemOp(getOp()); } + + AsyncFunctionResolveKind getAsyncFunctionResolveKind() { + return AsyncFunctionResolveKind(GET_UINT8(rawBytecode_)); + } + + bool resultIsPopped() const { + MOZ_ASSERT(StackDefs(getOp()) == 1); + return BytecodeIsPopped(rawBytecode_); + } + + // Accessors: + JSOp getOp() const { return JSOp(*rawBytecode_); } + + BytecodeLocation getJumpTarget() const { + MOZ_ASSERT(isJump()); + return BytecodeLocation(*this, + rawBytecode_ + GET_JUMP_OFFSET(rawBytecode_)); + } + + // Return the 'low' parameter to the tableswitch opcode + int32_t getTableSwitchLow() const { + MOZ_ASSERT(is(JSOp::TableSwitch)); + return GET_JUMP_OFFSET(rawBytecode_ + JUMP_OFFSET_LEN); + } + + // Return the 'high' parameter to the tableswitch opcode + int32_t getTableSwitchHigh() const { + MOZ_ASSERT(is(JSOp::TableSwitch)); + return GET_JUMP_OFFSET(rawBytecode_ + (2 * JUMP_OFFSET_LEN)); + } + + uint32_t getPopCount() const { + MOZ_ASSERT(is(JSOp::PopN)); + return GET_UINT16(rawBytecode_); + } + + uint32_t getDupAtIndex() const { + MOZ_ASSERT(is(JSOp::DupAt)); + return GET_UINT24(rawBytecode_); + } + + uint8_t getPickDepth() const { + MOZ_ASSERT(is(JSOp::Pick)); + return GET_UINT8(rawBytecode_); + } + uint8_t getUnpickDepth() const { + MOZ_ASSERT(is(JSOp::Unpick)); + return GET_UINT8(rawBytecode_); + } + + uint32_t getEnvCalleeNumHops() const { + MOZ_ASSERT(is(JSOp::EnvCallee)); + return GET_UINT8(rawBytecode_); + } + + EnvironmentCoordinate getEnvironmentCoordinate() const { + MOZ_ASSERT(JOF_OPTYPE(getOp()) == JOF_ENVCOORD); + return EnvironmentCoordinate(rawBytecode_); + } + + uint32_t getCallArgc() const { + MOZ_ASSERT(JOF_OPTYPE(getOp()) == JOF_ARGC); + return GET_ARGC(rawBytecode_); + } + + uint32_t getInitElemArrayIndex() const { + MOZ_ASSERT(is(JSOp::InitElemArray)); + uint32_t index = GET_UINT32(rawBytecode_); + MOZ_ASSERT(index <= INT32_MAX, + "the bytecode emitter must never generate JSOp::InitElemArray " + "with an index exceeding int32_t range"); + return index; + } + + FunctionPrefixKind getFunctionPrefixKind() const { + MOZ_ASSERT(is(JSOp::SetFunName)); + return FunctionPrefixKind(GET_UINT8(rawBytecode_)); + } + + CheckIsObjectKind getCheckIsObjectKind() const { + MOZ_ASSERT(is(JSOp::CheckIsObj)); + return CheckIsObjectKind(GET_UINT8(rawBytecode_)); + } + + BuiltinObjectKind getBuiltinObjectKind() const { + MOZ_ASSERT(is(JSOp::BuiltinObject)); + return BuiltinObjectKind(GET_UINT8(rawBytecode_)); + } + + CompletionKind getCompletionKind() const { + MOZ_ASSERT(is(JSOp::CloseIter)); + return CompletionKind(GET_UINT8(rawBytecode_)); + } + + uint32_t getNewArrayLength() const { + MOZ_ASSERT(is(JSOp::NewArray)); + return GET_UINT32(rawBytecode_); + } + + int8_t getInt8() const { + MOZ_ASSERT(is(JSOp::Int8)); + return GET_INT8(rawBytecode_); + } + uint16_t getUint16() const { + MOZ_ASSERT(is(JSOp::Uint16)); + return GET_UINT16(rawBytecode_); + } + uint32_t getUint24() const { + MOZ_ASSERT(is(JSOp::Uint24)); + return GET_UINT24(rawBytecode_); + } + int32_t getInt32() const { + MOZ_ASSERT(is(JSOp::Int32)); + return GET_INT32(rawBytecode_); + } + uint32_t getResumeIndex() const { + MOZ_ASSERT(is(JSOp::InitialYield) || is(JSOp::Yield) || is(JSOp::Await)); + return GET_RESUMEINDEX(rawBytecode_); + } + Value getInlineValue() const { + MOZ_ASSERT(is(JSOp::Double)); + return GET_INLINE_VALUE(rawBytecode_); + } + + GeneratorResumeKind resumeKind() { return ResumeKindFromPC(rawBytecode_); } + + ThrowMsgKind throwMsgKind() { + MOZ_ASSERT(is(JSOp::ThrowMsg)); + return static_cast(GET_UINT8(rawBytecode_)); + } + +#ifdef DEBUG + // To ease writing assertions + bool isValid() const { return isValid(debugOnlyScript_); } + + bool isInBounds() const { return isInBounds(debugOnlyScript_); } +#endif +}; + +} // namespace js + +#endif diff --git a/js/src/vm/BytecodeUtil-inl.h b/js/src/vm/BytecodeUtil-inl.h new file mode 100644 index 0000000000..f7b944b0dd --- /dev/null +++ b/js/src/vm/BytecodeUtil-inl.h @@ -0,0 +1,242 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeUtil_inl_h +#define vm_BytecodeUtil_inl_h + +#include "vm/BytecodeUtil.h" + +#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator +#include "vm/JSScript.h" + +namespace js { + +static inline unsigned GetDefCount(jsbytecode* pc) { + /* + * Add an extra pushed value for Or/And opcodes, so that they are included + * in the pushed array of stack values for type inference. + */ + JSOp op = JSOp(*pc); + switch (op) { + case JSOp::Or: + case JSOp::And: + case JSOp::Coalesce: + return 1; + case JSOp::Pick: + case JSOp::Unpick: + /* + * Pick pops and pushes how deep it looks in the stack + 1 + * items. i.e. if the stack were |a b[2] c[1] d[0]|, pick 2 + * would pop b, c, and d to rearrange the stack to |a c[0] + * d[1] b[2]|. + */ + return pc[1] + 1; + default: + return StackDefs(op); + } +} + +static inline unsigned GetUseCount(jsbytecode* pc) { + JSOp op = JSOp(*pc); + if (op == JSOp::Pick || op == JSOp::Unpick) { + return pc[1] + 1; + } + + return StackUses(op, pc); +} + +static inline JSOp ReverseCompareOp(JSOp op) { + switch (op) { + case JSOp::Gt: + return JSOp::Lt; + case JSOp::Ge: + return JSOp::Le; + case JSOp::Lt: + return JSOp::Gt; + case JSOp::Le: + return JSOp::Ge; + case JSOp::Eq: + case JSOp::Ne: + case JSOp::StrictEq: + case JSOp::StrictNe: + return op; + default: + MOZ_CRASH("unrecognized op"); + } +} + +static inline JSOp NegateCompareOp(JSOp op) { + switch (op) { + case JSOp::Gt: + return JSOp::Le; + case JSOp::Ge: + return JSOp::Lt; + case JSOp::Lt: + return JSOp::Ge; + case JSOp::Le: + return JSOp::Gt; + case JSOp::Eq: + return JSOp::Ne; + case JSOp::Ne: + return JSOp::Eq; + case JSOp::StrictNe: + return JSOp::StrictEq; + case JSOp::StrictEq: + return JSOp::StrictNe; + default: + MOZ_CRASH("unrecognized op"); + } +} + +class BytecodeRange { + public: + BytecodeRange(JSContext* cx, JSScript* script) + : script(cx, script), pc(script->code()), end(pc + script->length()) {} + bool empty() const { return pc == end; } + jsbytecode* frontPC() const { return pc; } + JSOp frontOpcode() const { return JSOp(*pc); } + size_t frontOffset() const { return script->pcToOffset(pc); } + void popFront() { pc += GetBytecodeLength(pc); } + + private: + RootedScript script; + jsbytecode* pc; + jsbytecode* end; +}; + +class BytecodeRangeWithPosition : private BytecodeRange { + public: + using BytecodeRange::empty; + using BytecodeRange::frontOffset; + using BytecodeRange::frontOpcode; + using BytecodeRange::frontPC; + + BytecodeRangeWithPosition(JSContext* cx, JSScript* script) + : BytecodeRange(cx, script), + initialLine(script->lineno()), + lineno(script->lineno()), + column(script->column()), + sn(script->notes()), + snpc(script->code()), + isEntryPoint(false), + isBreakpoint(false), + seenStepSeparator(false), + wasArtifactEntryPoint(false) { + if (!sn->isTerminator()) { + snpc += sn->delta(); + } + updatePosition(); + while (frontPC() != script->main()) { + popFront(); + } + + if (frontOpcode() != JSOp::JumpTarget) { + isEntryPoint = true; + } else { + wasArtifactEntryPoint = true; + } + } + + void popFront() { + BytecodeRange::popFront(); + if (empty()) { + isEntryPoint = false; + } else { + updatePosition(); + } + + // The following conditions are handling artifacts introduced by the + // bytecode emitter, such that we do not add breakpoints on empty + // statements of the source code of the user. + if (wasArtifactEntryPoint) { + wasArtifactEntryPoint = false; + isEntryPoint = true; + } + + if (isEntryPoint && frontOpcode() == JSOp::JumpTarget) { + wasArtifactEntryPoint = isEntryPoint; + isEntryPoint = false; + } + } + + size_t frontLineNumber() const { return lineno; } + size_t frontColumnNumber() const { return column; } + + // Entry points are restricted to bytecode offsets that have an + // explicit mention in the line table. This restriction avoids a + // number of failing cases caused by some instructions not having + // sensible (to the user) line numbers, and it is one way to + // implement the idea that the bytecode emitter should tell the + // debugger exactly which offsets represent "interesting" (to the + // user) places to stop. + bool frontIsEntryPoint() const { return isEntryPoint; } + + // Breakable points are explicitly marked by the emitter as locations where + // the debugger may want to allow users to pause. + bool frontIsBreakablePoint() const { return isBreakpoint; } + + // Breakable step points are the first breakable point after a + // SrcNote::StepSep note has been encountered. + bool frontIsBreakableStepPoint() const { + return isBreakpoint && seenStepSeparator; + } + + private: + void updatePosition() { + if (isBreakpoint) { + isBreakpoint = false; + seenStepSeparator = false; + } + + // Determine the current line number by reading all source notes up to + // and including the current offset. + jsbytecode* lastLinePC = nullptr; + SrcNoteIterator iter(sn); + for (; !iter.atEnd() && snpc <= frontPC(); + ++iter, snpc += (*iter)->delta()) { + auto sn = *iter; + + SrcNoteType type = sn->type(); + if (type == SrcNoteType::ColSpan) { + ptrdiff_t colspan = SrcNote::ColSpan::getSpan(sn); + MOZ_ASSERT(ptrdiff_t(column) + colspan >= 0); + column += colspan; + lastLinePC = snpc; + } else if (type == SrcNoteType::SetLine) { + lineno = SrcNote::SetLine::getLine(sn, initialLine); + column = 0; + lastLinePC = snpc; + } else if (type == SrcNoteType::NewLine) { + lineno++; + column = 0; + lastLinePC = snpc; + } else if (type == SrcNoteType::Breakpoint) { + isBreakpoint = true; + lastLinePC = snpc; + } else if (type == SrcNoteType::StepSep) { + seenStepSeparator = true; + lastLinePC = snpc; + } + } + + sn = *iter; + isEntryPoint = lastLinePC == frontPC(); + } + + size_t initialLine; + size_t lineno; + size_t column; + const SrcNote* sn; + jsbytecode* snpc; + bool isEntryPoint; + bool isBreakpoint; + bool seenStepSeparator; + bool wasArtifactEntryPoint; +}; + +} // namespace js + +#endif /* vm_BytecodeUtil_inl_h */ diff --git a/js/src/vm/BytecodeUtil.cpp b/js/src/vm/BytecodeUtil.cpp new file mode 100644 index 0000000000..2d73cf5340 --- /dev/null +++ b/js/src/vm/BytecodeUtil.cpp @@ -0,0 +1,3110 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * JS bytecode descriptors, disassemblers, and (expression) decompilers. + */ + +#include "vm/BytecodeUtil-inl.h" + +#define __STDC_FORMAT_MACROS + +#include "mozilla/Maybe.h" +#include "mozilla/ReverseIterator.h" +#include "mozilla/Sprintf.h" + +#include +#include +#include + +#include "jsapi.h" +#include "jstypes.h" + +#include "frontend/BytecodeCompiler.h" +#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator +#include "gc/PublicIterators.h" +#include "jit/IonScript.h" // IonBlockCounts +#include "js/CharacterEncoding.h" +#include "js/experimental/CodeCoverage.h" +#include "js/experimental/PCCountProfiling.h" // JS::{Start,Stop}PCCountProfiling, JS::PurgePCCounts, JS::GetPCCountScript{Count,Summary,Contents} +#include "js/friend/DumpFunctions.h" // js::DumpPC, js::DumpScript +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "js/Printer.h" +#include "js/Printf.h" +#include "js/Symbol.h" +#include "util/DifferentialTesting.h" +#include "util/Memory.h" +#include "util/Text.h" +#include "vm/BuiltinObjectKind.h" +#include "vm/BytecodeIterator.h" // for AllBytecodesIterable +#include "vm/BytecodeLocation.h" +#include "vm/CodeCoverage.h" +#include "vm/EnvironmentObject.h" +#include "vm/FrameIter.h" // js::{,Script}FrameIter +#include "vm/JSAtom.h" +#include "vm/JSContext.h" +#include "vm/JSFunction.h" +#include "vm/JSObject.h" +#include "vm/JSONPrinter.h" +#include "vm/JSScript.h" +#include "vm/Opcodes.h" +#include "vm/Realm.h" +#include "vm/Shape.h" +#include "vm/ToSource.h" // js::ValueToSource +#include "vm/WellKnownAtom.h" // js_*_str + +#include "gc/GC-inl.h" +#include "vm/BytecodeIterator-inl.h" +#include "vm/JSContext-inl.h" +#include "vm/JSScript-inl.h" +#include "vm/Realm-inl.h" + +using namespace js; + +using js::frontend::IsIdentifier; + +/* + * Index limit must stay within 32 bits. + */ +static_assert(sizeof(uint32_t) * CHAR_BIT >= INDEX_LIMIT_LOG2 + 1); + +const JSCodeSpec js::CodeSpecTable[] = { +#define MAKE_CODESPEC(op, op_snake, token, length, nuses, ndefs, format) \ + {length, nuses, ndefs, format}, + FOR_EACH_OPCODE(MAKE_CODESPEC) +#undef MAKE_CODESPEC +}; + +/* + * Each element of the array is either a source literal associated with JS + * bytecode or null. + */ +static const char* const CodeToken[] = { +#define TOKEN(op, op_snake, token, ...) token, + FOR_EACH_OPCODE(TOKEN) +#undef TOKEN +}; + +/* + * Array of JS bytecode names used by PC count JSON, DEBUG-only Disassemble + * and JIT debug spew. + */ +const char* const js::CodeNameTable[] = { +#define OPNAME(op, ...) #op, + FOR_EACH_OPCODE(OPNAME) +#undef OPNAME +}; + +/************************************************************************/ + +static bool DecompileArgumentFromStack(JSContext* cx, int formalIndex, + UniqueChars* res); + +/* static */ const char PCCounts::numExecName[] = "interp"; + +[[nodiscard]] static bool DumpIonScriptCounts(Sprinter* sp, HandleScript script, + jit::IonScriptCounts* ionCounts) { + if (!sp->jsprintf("IonScript [%zu blocks]:\n", ionCounts->numBlocks())) { + return false; + } + + for (size_t i = 0; i < ionCounts->numBlocks(); i++) { + const jit::IonBlockCounts& block = ionCounts->block(i); + unsigned lineNumber = 0, columnNumber = 0; + lineNumber = PCToLineNumber(script, script->offsetToPC(block.offset()), + &columnNumber); + if (!sp->jsprintf("BB #%" PRIu32 " [%05u,%u,%u]", block.id(), + block.offset(), lineNumber, columnNumber)) { + return false; + } + if (block.description()) { + if (!sp->jsprintf(" [inlined %s]", block.description())) { + return false; + } + } + for (size_t j = 0; j < block.numSuccessors(); j++) { + if (!sp->jsprintf(" -> #%" PRIu32, block.successor(j))) { + return false; + } + } + if (!sp->jsprintf(" :: %" PRIu64 " hits\n", block.hitCount())) { + return false; + } + if (!sp->jsprintf("%s\n", block.code())) { + return false; + } + } + + return true; +} + +[[nodiscard]] static bool DumpPCCounts(JSContext* cx, HandleScript script, + Sprinter* sp) { + MOZ_ASSERT(script->hasScriptCounts()); + + // Ensure the Disassemble1 call below does not discard the script counts. + gc::AutoSuppressGC suppress(cx); + +#ifdef DEBUG + jsbytecode* pc = script->code(); + while (pc < script->codeEnd()) { + jsbytecode* next = GetNextPc(pc); + + if (!Disassemble1(cx, script, pc, script->pcToOffset(pc), true, sp)) { + return false; + } + + if (!sp->put(" {")) { + return false; + } + + PCCounts* counts = script->maybeGetPCCounts(pc); + if (double val = counts ? counts->numExec() : 0.0) { + if (!sp->jsprintf("\"%s\": %.0f", PCCounts::numExecName, val)) { + return false; + } + } + if (!sp->put("}\n")) { + return false; + } + + pc = next; + } +#endif + + jit::IonScriptCounts* ionCounts = script->getIonCounts(); + while (ionCounts) { + if (!DumpIonScriptCounts(sp, script, ionCounts)) { + return false; + } + + ionCounts = ionCounts->previous(); + } + + return true; +} + +bool js::DumpRealmPCCounts(JSContext* cx) { + Rooted> scripts(cx, GCVector(cx)); + for (auto base = cx->zone()->cellIter(); !base.done(); + base.next()) { + if (base->realm() != cx->realm()) { + continue; + } + MOZ_ASSERT_IF(base->hasScriptCounts(), base->hasBytecode()); + if (base->hasScriptCounts()) { + if (!scripts.append(base->asJSScript())) { + return false; + } + } + } + + for (uint32_t i = 0; i < scripts.length(); i++) { + HandleScript script = scripts[i]; + Sprinter sprinter(cx); + if (!sprinter.init()) { + return false; + } + + const char* filename = script->filename(); + if (!filename) { + filename = "(unknown)"; + } + fprintf(stdout, "--- SCRIPT %s:%u ---\n", filename, script->lineno()); + if (!DumpPCCounts(cx, script, &sprinter)) { + return false; + } + fputs(sprinter.string(), stdout); + fprintf(stdout, "--- END SCRIPT %s:%u ---\n", filename, script->lineno()); + } + + return true; +} + +///////////////////////////////////////////////////////////////////// +// Bytecode Parser +///////////////////////////////////////////////////////////////////// + +// Stores the information about the stack slot, where the value comes from. +// Elements of BytecodeParser::Bytecode.{offsetStack,offsetStackAfter} arrays. +class OffsetAndDefIndex { + // The offset of the PC that pushed the value for this slot. + uint32_t offset_; + + // The index in `ndefs` for the PC (0-origin) + uint8_t defIndex_; + + enum : uint8_t { + Normal = 0, + + // Ignored this value in the expression decompilation. + // Used by JSOp::NopDestructuring. See BytecodeParser::simulateOp. + Ignored, + + // The value in this slot comes from 2 or more paths. + // offset_ and defIndex_ holds the information for the path that + // reaches here first. + Merged, + } type_; + + public: + uint32_t offset() const { + MOZ_ASSERT(!isSpecial()); + return offset_; + }; + uint32_t specialOffset() const { + MOZ_ASSERT(isSpecial()); + return offset_; + }; + + uint8_t defIndex() const { + MOZ_ASSERT(!isSpecial()); + return defIndex_; + } + uint8_t specialDefIndex() const { + MOZ_ASSERT(isSpecial()); + return defIndex_; + } + + bool isSpecial() const { return type_ != Normal; } + bool isMerged() const { return type_ == Merged; } + bool isIgnored() const { return type_ == Ignored; } + + void set(uint32_t aOffset, uint8_t aDefIndex) { + offset_ = aOffset; + defIndex_ = aDefIndex; + type_ = Normal; + } + + // Keep offset_ and defIndex_ values for stack dump. + void setMerged() { type_ = Merged; } + void setIgnored() { type_ = Ignored; } + + bool operator==(const OffsetAndDefIndex& rhs) const { + return offset_ == rhs.offset_ && defIndex_ == rhs.defIndex_; + } + + bool operator!=(const OffsetAndDefIndex& rhs) const { + return !(*this == rhs); + } +}; + +namespace { + +class BytecodeParser { + public: + enum class JumpKind { + Simple, + SwitchCase, + SwitchDefault, + TryCatch, + TryFinally + }; + + private: + class Bytecode { + public: + explicit Bytecode(const LifoAllocPolicy& alloc) + : parsed(false), + stackDepth(0), + offsetStack(nullptr) +#if defined(DEBUG) || defined(JS_JITSPEW) + , + stackDepthAfter(0), + offsetStackAfter(nullptr), + jumpOrigins(alloc) +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + { + } + + // Whether this instruction has been analyzed to get its output defines + // and stack. + bool parsed; + + // Stack depth before this opcode. + uint32_t stackDepth; + + // Pointer to array of |stackDepth| offsets. An element at position N + // in the array is the offset of the opcode that defined the + // corresponding stack slot. The top of the stack is at position + // |stackDepth - 1|. + OffsetAndDefIndex* offsetStack; + +#if defined(DEBUG) || defined(JS_JITSPEW) + // stack depth after this opcode. + uint32_t stackDepthAfter; + + // Pointer to array of |stackDepthAfter| offsets. + OffsetAndDefIndex* offsetStackAfter; + + struct JumpInfo { + uint32_t from; + JumpKind kind; + + JumpInfo(uint32_t from_, JumpKind kind_) : from(from_), kind(kind_) {} + }; + + // A list of offsets of the bytecode that jumps to this bytecode, + // exclusing previous bytecode. + Vector> jumpOrigins; +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + + bool captureOffsetStack(LifoAlloc& alloc, const OffsetAndDefIndex* stack, + uint32_t depth) { + stackDepth = depth; + if (stackDepth) { + offsetStack = alloc.newArray(stackDepth); + if (!offsetStack) { + return false; + } + for (uint32_t n = 0; n < stackDepth; n++) { + offsetStack[n] = stack[n]; + } + } + return true; + } + +#if defined(DEBUG) || defined(JS_JITSPEW) + bool captureOffsetStackAfter(LifoAlloc& alloc, + const OffsetAndDefIndex* stack, + uint32_t depth) { + stackDepthAfter = depth; + if (stackDepthAfter) { + offsetStackAfter = alloc.newArray(stackDepthAfter); + if (!offsetStackAfter) { + return false; + } + for (uint32_t n = 0; n < stackDepthAfter; n++) { + offsetStackAfter[n] = stack[n]; + } + } + return true; + } + + bool addJump(uint32_t from, JumpKind kind) { + return jumpOrigins.append(JumpInfo(from, kind)); + } +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + + // When control-flow merges, intersect the stacks, marking slots that + // are defined by different offsets and/or defIndices merged. + // This is sufficient for forward control-flow. It doesn't grok loops + // -- for that you would have to iterate to a fixed point -- but there + // shouldn't be operands on the stack at a loop back-edge anyway. + void mergeOffsetStack(const OffsetAndDefIndex* stack, uint32_t depth) { + MOZ_ASSERT(depth == stackDepth); + for (uint32_t n = 0; n < stackDepth; n++) { + if (stack[n].isIgnored()) { + continue; + } + if (offsetStack[n].isIgnored()) { + offsetStack[n] = stack[n]; + } + if (offsetStack[n] != stack[n]) { + offsetStack[n].setMerged(); + } + } + } + }; + + JSContext* cx_; + LifoAlloc& alloc_; + RootedScript script_; + + Bytecode** codeArray_; + +#if defined(DEBUG) || defined(JS_JITSPEW) + // Dedicated mode for stack dump. + // Capture stack after each opcode, and also enable special handling for + // some opcodes to make stack transition clearer. + bool isStackDump; +#endif + + public: + BytecodeParser(JSContext* cx, LifoAlloc& alloc, JSScript* script) + : cx_(cx), + alloc_(alloc), + script_(cx, script), + codeArray_(nullptr) +#ifdef DEBUG + , + isStackDump(false) +#endif + { + } + + bool parse(); + +#if defined(DEBUG) || defined(JS_JITSPEW) + bool isReachable(const jsbytecode* pc) const { return maybeCode(pc); } +#endif + + uint32_t stackDepthAtPC(uint32_t offset) const { + // Sometimes the code generator in debug mode asks about the stack depth + // of unreachable code (bug 932180 comment 22). Assume that unreachable + // code has no operands on the stack. + return getCode(offset).stackDepth; + } + uint32_t stackDepthAtPC(const jsbytecode* pc) const { + return stackDepthAtPC(script_->pcToOffset(pc)); + } + +#if defined(DEBUG) || defined(JS_JITSPEW) + uint32_t stackDepthAfterPC(uint32_t offset) const { + return getCode(offset).stackDepthAfter; + } + uint32_t stackDepthAfterPC(const jsbytecode* pc) const { + return stackDepthAfterPC(script_->pcToOffset(pc)); + } +#endif + + const OffsetAndDefIndex& offsetForStackOperand(uint32_t offset, + int operand) const { + Bytecode& code = getCode(offset); + if (operand < 0) { + operand += code.stackDepth; + MOZ_ASSERT(operand >= 0); + } + MOZ_ASSERT(uint32_t(operand) < code.stackDepth); + return code.offsetStack[operand]; + } + jsbytecode* pcForStackOperand(jsbytecode* pc, int operand, + uint8_t* defIndex) const { + size_t offset = script_->pcToOffset(pc); + const OffsetAndDefIndex& offsetAndDefIndex = + offsetForStackOperand(offset, operand); + if (offsetAndDefIndex.isSpecial()) { + return nullptr; + } + *defIndex = offsetAndDefIndex.defIndex(); + return script_->offsetToPC(offsetAndDefIndex.offset()); + } + +#if defined(DEBUG) || defined(JS_JITSPEW) + const OffsetAndDefIndex& offsetForStackOperandAfterPC(uint32_t offset, + int operand) const { + Bytecode& code = getCode(offset); + if (operand < 0) { + operand += code.stackDepthAfter; + MOZ_ASSERT(operand >= 0); + } + MOZ_ASSERT(uint32_t(operand) < code.stackDepthAfter); + return code.offsetStackAfter[operand]; + } + + template + bool forEachJumpOrigins(jsbytecode* pc, Callback callback) const { + Bytecode& code = getCode(script_->pcToOffset(pc)); + + for (Bytecode::JumpInfo& info : code.jumpOrigins) { + if (!callback(script_->offsetToPC(info.from), info.kind)) { + return false; + } + } + + return true; + } + + void setStackDump() { isStackDump = true; } +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + + private: + LifoAlloc& alloc() { return alloc_; } + + void reportOOM() { ReportOutOfMemory(cx_); } + + uint32_t maximumStackDepth() const { + return script_->nslots() - script_->nfixed(); + } + + Bytecode& getCode(uint32_t offset) const { + MOZ_ASSERT(offset < script_->length()); + MOZ_ASSERT(codeArray_[offset]); + return *codeArray_[offset]; + } + + Bytecode* maybeCode(uint32_t offset) const { + MOZ_ASSERT(offset < script_->length()); + return codeArray_[offset]; + } + +#if defined(DEBUG) || defined(JS_JITSPEW) + Bytecode* maybeCode(const jsbytecode* pc) const { + return maybeCode(script_->pcToOffset(pc)); + } +#endif + + uint32_t simulateOp(JSOp op, uint32_t offset, OffsetAndDefIndex* offsetStack, + uint32_t stackDepth); + + inline bool recordBytecode(uint32_t offset, + const OffsetAndDefIndex* offsetStack, + uint32_t stackDepth); + + inline bool addJump(uint32_t offset, uint32_t stackDepth, + const OffsetAndDefIndex* offsetStack, jsbytecode* pc, + JumpKind kind); +}; + +} // anonymous namespace + +uint32_t BytecodeParser::simulateOp(JSOp op, uint32_t offset, + OffsetAndDefIndex* offsetStack, + uint32_t stackDepth) { + jsbytecode* pc = script_->offsetToPC(offset); + uint32_t nuses = GetUseCount(pc); + uint32_t ndefs = GetDefCount(pc); + + MOZ_RELEASE_ASSERT(stackDepth >= nuses); + stackDepth -= nuses; + MOZ_RELEASE_ASSERT(stackDepth + ndefs <= maximumStackDepth()); + +#ifdef DEBUG + if (isStackDump) { + // Opcodes that modifies the object but keeps it on the stack while + // initialization should be listed here instead of switch below. + // For error message, they shouldn't be shown as the original object + // after adding properties. + // For stack dump, keeping the input is better. + switch (op) { + case JSOp::InitHiddenProp: + case JSOp::InitHiddenPropGetter: + case JSOp::InitHiddenPropSetter: + case JSOp::InitLockedProp: + case JSOp::InitProp: + case JSOp::InitPropGetter: + case JSOp::InitPropSetter: + case JSOp::SetFunName: + // Keep the second value. + MOZ_ASSERT(nuses == 2); + MOZ_ASSERT(ndefs == 1); + goto end; + + case JSOp::InitElem: + case JSOp::InitElemGetter: + case JSOp::InitElemSetter: + case JSOp::InitHiddenElem: + case JSOp::InitHiddenElemGetter: + case JSOp::InitHiddenElemSetter: + case JSOp::InitLockedElem: + // Keep the third value. + MOZ_ASSERT(nuses == 3); + MOZ_ASSERT(ndefs == 1); + goto end; + + default: + break; + } + } +#endif /* DEBUG */ + + // Mark the current offset as defining its values on the offset stack, + // unless it just reshuffles the stack. In that case we want to preserve + // the opcode that generated the original value. + switch (op) { + default: + for (uint32_t n = 0; n != ndefs; ++n) { + offsetStack[stackDepth + n].set(offset, n); + } + break; + + case JSOp::NopDestructuring: + // Poison the last offset to not obfuscate the error message. + offsetStack[stackDepth - 1].setIgnored(); + break; + + case JSOp::Case: + // Keep the switch value. + MOZ_ASSERT(ndefs == 1); + break; + + case JSOp::Dup: + MOZ_ASSERT(ndefs == 2); + offsetStack[stackDepth + 1] = offsetStack[stackDepth]; + break; + + case JSOp::Dup2: + MOZ_ASSERT(ndefs == 4); + offsetStack[stackDepth + 2] = offsetStack[stackDepth]; + offsetStack[stackDepth + 3] = offsetStack[stackDepth + 1]; + break; + + case JSOp::DupAt: { + MOZ_ASSERT(ndefs == 1); + unsigned n = GET_UINT24(pc); + MOZ_ASSERT(n < stackDepth); + offsetStack[stackDepth] = offsetStack[stackDepth - 1 - n]; + break; + } + + case JSOp::Swap: { + MOZ_ASSERT(ndefs == 2); + OffsetAndDefIndex tmp = offsetStack[stackDepth + 1]; + offsetStack[stackDepth + 1] = offsetStack[stackDepth]; + offsetStack[stackDepth] = tmp; + break; + } + + case JSOp::Pick: { + unsigned n = GET_UINT8(pc); + MOZ_ASSERT(ndefs == n + 1); + uint32_t top = stackDepth + n; + OffsetAndDefIndex tmp = offsetStack[stackDepth]; + for (uint32_t i = stackDepth; i < top; i++) { + offsetStack[i] = offsetStack[i + 1]; + } + offsetStack[top] = tmp; + break; + } + + case JSOp::Unpick: { + unsigned n = GET_UINT8(pc); + MOZ_ASSERT(ndefs == n + 1); + uint32_t top = stackDepth + n; + OffsetAndDefIndex tmp = offsetStack[top]; + for (uint32_t i = top; i > stackDepth; i--) { + offsetStack[i] = offsetStack[i - 1]; + } + offsetStack[stackDepth] = tmp; + break; + } + + case JSOp::And: + case JSOp::CheckIsObj: + case JSOp::CheckObjCoercible: + case JSOp::CheckThis: + case JSOp::CheckThisReinit: + case JSOp::CheckClassHeritage: + case JSOp::DebugCheckSelfHosted: + case JSOp::InitGLexical: + case JSOp::InitLexical: + case JSOp::Or: + case JSOp::Coalesce: + case JSOp::SetAliasedVar: + case JSOp::SetArg: + case JSOp::SetIntrinsic: + case JSOp::SetLocal: + case JSOp::InitAliasedLexical: + case JSOp::CheckLexical: + case JSOp::CheckAliasedLexical: + // Keep the top value. + MOZ_ASSERT(nuses == 1); + MOZ_ASSERT(ndefs == 1); + break; + + case JSOp::InitHomeObject: + // Pop the top value, keep the other value. + MOZ_ASSERT(nuses == 2); + MOZ_ASSERT(ndefs == 1); + break; + + case JSOp::CheckResumeKind: + // Pop the top two values, keep the other value. + MOZ_ASSERT(nuses == 3); + MOZ_ASSERT(ndefs == 1); + break; + + case JSOp::SetGName: + case JSOp::SetName: + case JSOp::SetProp: + case JSOp::StrictSetGName: + case JSOp::StrictSetName: + case JSOp::StrictSetProp: + // Keep the top value, removing other 1 value. + MOZ_ASSERT(nuses == 2); + MOZ_ASSERT(ndefs == 1); + offsetStack[stackDepth] = offsetStack[stackDepth + 1]; + break; + + case JSOp::SetPropSuper: + case JSOp::StrictSetPropSuper: + // Keep the top value, removing other 2 values. + MOZ_ASSERT(nuses == 3); + MOZ_ASSERT(ndefs == 1); + offsetStack[stackDepth] = offsetStack[stackDepth + 2]; + break; + + case JSOp::SetElemSuper: + case JSOp::StrictSetElemSuper: + // Keep the top value, removing other 3 values. + MOZ_ASSERT(nuses == 4); + MOZ_ASSERT(ndefs == 1); + offsetStack[stackDepth] = offsetStack[stackDepth + 3]; + break; + + case JSOp::IsGenClosing: + case JSOp::IsNoIter: + case JSOp::IsNullOrUndefined: + case JSOp::MoreIter: + // Keep the top value and push one more value. + MOZ_ASSERT(nuses == 1); + MOZ_ASSERT(ndefs == 2); + offsetStack[stackDepth + 1].set(offset, 1); + break; + + case JSOp::CheckPrivateField: + // Keep the top two values, and push one new value. + MOZ_ASSERT(nuses == 2); + MOZ_ASSERT(ndefs == 3); + offsetStack[stackDepth + 2].set(offset, 2); + break; + } + +#ifdef DEBUG +end: +#endif /* DEBUG */ + + stackDepth += ndefs; + return stackDepth; +} + +bool BytecodeParser::recordBytecode(uint32_t offset, + const OffsetAndDefIndex* offsetStack, + uint32_t stackDepth) { + MOZ_RELEASE_ASSERT(offset < script_->length()); + MOZ_RELEASE_ASSERT(stackDepth <= maximumStackDepth()); + + Bytecode*& code = codeArray_[offset]; + if (!code) { + code = alloc().new_(alloc()); + if (!code || !code->captureOffsetStack(alloc(), offsetStack, stackDepth)) { + reportOOM(); + return false; + } + } else { + code->mergeOffsetStack(offsetStack, stackDepth); + } + + return true; +} + +bool BytecodeParser::addJump(uint32_t offset, uint32_t stackDepth, + const OffsetAndDefIndex* offsetStack, + jsbytecode* pc, JumpKind kind) { + if (!recordBytecode(offset, offsetStack, stackDepth)) { + return false; + } + +#ifdef DEBUG + uint32_t currentOffset = script_->pcToOffset(pc); + if (isStackDump) { + if (!codeArray_[offset]->addJump(currentOffset, kind)) { + reportOOM(); + return false; + } + } + + // If this is a backedge, assert we parsed the target JSOp::LoopHead. + MOZ_ASSERT_IF(offset < currentOffset, codeArray_[offset]->parsed); +#endif /* DEBUG */ + + return true; +} + +bool BytecodeParser::parse() { + MOZ_ASSERT(!codeArray_); + + uint32_t length = script_->length(); + codeArray_ = alloc().newArray(length); + + if (!codeArray_) { + reportOOM(); + return false; + } + + mozilla::PodZero(codeArray_, length); + + // Fill in stack depth and definitions at initial bytecode. + Bytecode* startcode = alloc().new_(alloc()); + if (!startcode) { + reportOOM(); + return false; + } + + // Fill in stack depth and definitions at initial bytecode. + OffsetAndDefIndex* offsetStack = + alloc().newArray(maximumStackDepth()); + if (maximumStackDepth() && !offsetStack) { + reportOOM(); + return false; + } + + startcode->stackDepth = 0; + codeArray_[0] = startcode; + + for (uint32_t offset = 0, nextOffset = 0; offset < length; + offset = nextOffset) { + Bytecode* code = maybeCode(offset); + jsbytecode* pc = script_->offsetToPC(offset); + + // Next bytecode to analyze. + nextOffset = offset + GetBytecodeLength(pc); + + MOZ_RELEASE_ASSERT(*pc < JSOP_LIMIT); + JSOp op = JSOp(*pc); + + if (!code) { + // Haven't found a path by which this bytecode is reachable. + continue; + } + + // On a jump target, we reload the offsetStack saved for the current + // bytecode, as it contains either the original offset stack, or the + // merged offset stack. + if (BytecodeIsJumpTarget(op)) { + for (uint32_t n = 0; n < code->stackDepth; ++n) { + offsetStack[n] = code->offsetStack[n]; + } + } + + if (code->parsed) { + // No need to reparse. + continue; + } + + code->parsed = true; + + uint32_t stackDepth = simulateOp(op, offset, offsetStack, code->stackDepth); + +#if defined(DEBUG) || defined(JS_JITSPEW) + if (isStackDump) { + if (!code->captureOffsetStackAfter(alloc(), offsetStack, stackDepth)) { + reportOOM(); + return false; + } + } +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + + switch (op) { + case JSOp::TableSwitch: { + uint32_t defaultOffset = offset + GET_JUMP_OFFSET(pc); + jsbytecode* pc2 = pc + JUMP_OFFSET_LEN; + int32_t low = GET_JUMP_OFFSET(pc2); + pc2 += JUMP_OFFSET_LEN; + int32_t high = GET_JUMP_OFFSET(pc2); + pc2 += JUMP_OFFSET_LEN; + + if (!addJump(defaultOffset, stackDepth, offsetStack, pc, + JumpKind::SwitchDefault)) { + return false; + } + + uint32_t ncases = high - low + 1; + + for (uint32_t i = 0; i < ncases; i++) { + uint32_t targetOffset = script_->tableSwitchCaseOffset(pc, i); + if (targetOffset != defaultOffset) { + if (!addJump(targetOffset, stackDepth, offsetStack, pc, + JumpKind::SwitchCase)) { + return false; + } + } + } + break; + } + + case JSOp::Try: { + // Everything between a try and corresponding catch or finally is + // conditional. Note that there is no problem with code which is skipped + // by a thrown exception but is not caught by a later handler in the + // same function: no more code will execute, and it does not matter what + // is defined. + for (const TryNote& tn : script_->trynotes()) { + if (tn.start == offset + JSOpLength_Try) { + uint32_t catchOffset = tn.start + tn.length; + if (tn.kind() == TryNoteKind::Catch) { + if (!addJump(catchOffset, stackDepth, offsetStack, pc, + JumpKind::TryCatch)) { + return false; + } + } else if (tn.kind() == TryNoteKind::Finally) { + // Two additional values will be on the stack at the beginning + // of the finally block: the exception/resume index, and the + // |throwing| value. For the benefit of the decompiler, point + // them at this Try. + offsetStack[stackDepth].set(offset, 0); + offsetStack[stackDepth + 1].set(offset, 1); + if (!addJump(catchOffset, stackDepth + 2, offsetStack, pc, + JumpKind::TryFinally)) { + return false; + } + } + } + } + break; + } + + default: + break; + } + + // Check basic jump opcodes, which may or may not have a fallthrough. + if (IsJumpOpcode(op)) { + // Case instructions do not push the lvalue back when branching. + uint32_t newStackDepth = stackDepth; + if (op == JSOp::Case) { + newStackDepth--; + } + + uint32_t targetOffset = offset + GET_JUMP_OFFSET(pc); + if (!addJump(targetOffset, newStackDepth, offsetStack, pc, + JumpKind::Simple)) { + return false; + } + } + + // Handle any fallthrough from this opcode. + if (BytecodeFallsThrough(op)) { + if (!recordBytecode(nextOffset, offsetStack, stackDepth)) { + return false; + } + } + } + + return true; +} + +#if defined(DEBUG) || defined(JS_JITSPEW) + +bool js::ReconstructStackDepth(JSContext* cx, JSScript* script, jsbytecode* pc, + uint32_t* depth, bool* reachablePC) { + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), script); + if (!parser.parse()) { + return false; + } + + *reachablePC = parser.isReachable(pc); + + if (*reachablePC) { + *depth = parser.stackDepthAtPC(pc); + } + + return true; +} + +static unsigned Disassemble1(JSContext* cx, HandleScript script, jsbytecode* pc, + unsigned loc, bool lines, + const BytecodeParser* parser, Sprinter* sp); + +/* + * If pc != nullptr, include a prefix indicating whether the PC is at the + * current line. If showAll is true, include the source note type and the + * entry stack depth. + */ +[[nodiscard]] static bool DisassembleAtPC( + JSContext* cx, JSScript* scriptArg, bool lines, const jsbytecode* pc, + bool showAll, Sprinter* sp, + DisassembleSkeptically skeptically = DisassembleSkeptically::No) { + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + RootedScript script(cx, scriptArg); + mozilla::Maybe parser; + + if (skeptically == DisassembleSkeptically::No) { + parser.emplace(cx, allocScope.alloc(), script); + parser->setStackDump(); + if (!parser->parse()) { + return false; + } + } + + if (showAll) { + if (!sp->jsprintf("%s:%u\n", script->filename(), + unsigned(script->lineno()))) { + return false; + } + } + + if (pc != nullptr) { + if (!sp->put(" ")) { + return false; + } + } + if (showAll) { + if (!sp->put("sn stack ")) { + return false; + } + } + if (!sp->put("loc ")) { + return false; + } + if (lines) { + if (!sp->put("line")) { + return false; + } + } + if (!sp->put(" op\n")) { + return false; + } + + if (pc != nullptr) { + if (!sp->put(" ")) { + return false; + } + } + if (showAll) { + if (!sp->put("-- ----- ")) { + return false; + } + } + if (!sp->put("----- ")) { + return false; + } + if (lines) { + if (!sp->put("----")) { + return false; + } + } + if (!sp->put(" --\n")) { + return false; + } + + jsbytecode* next = script->code(); + jsbytecode* end = script->codeEnd(); + while (next < end) { + if (next == script->main()) { + if (!sp->put("main:\n")) { + return false; + } + } + if (pc != nullptr) { + if (!sp->put(pc == next ? "--> " : " ")) { + return false; + } + } + if (showAll) { + const SrcNote* sn = GetSrcNote(cx, script, next); + if (sn) { + MOZ_ASSERT(!sn->isTerminator()); + SrcNoteIterator iter(sn); + while (true) { + ++iter; + auto next = *iter; + if (!(!next->isTerminator() && next->delta() == 0)) { + break; + } + if (!sp->jsprintf("%s\n ", sn->name())) { + return false; + } + sn = *iter; + } + if (!sp->jsprintf("%s ", sn->name())) { + return false; + } + } else { + if (!sp->put(" ")) { + return false; + } + } + if (parser && parser->isReachable(next)) { + if (!sp->jsprintf("%05u ", parser->stackDepthAtPC(next))) { + return false; + } + } else { + if (!sp->put(" ")) { + return false; + } + } + } + unsigned len = Disassemble1(cx, script, next, script->pcToOffset(next), + lines, parser.ptrOr(nullptr), sp); + if (!len) { + return false; + } + + next += len; + } + + return true; +} + +bool js::Disassemble(JSContext* cx, HandleScript script, bool lines, + Sprinter* sp, DisassembleSkeptically skeptically) { + return DisassembleAtPC(cx, script, lines, nullptr, false, sp, skeptically); +} + +JS_PUBLIC_API bool js::DumpPC(JSContext* cx, FILE* fp) { + gc::AutoSuppressGC suppressGC(cx); + Sprinter sprinter(cx); + if (!sprinter.init()) { + return false; + } + ScriptFrameIter iter(cx); + if (iter.done()) { + fprintf(fp, "Empty stack.\n"); + return true; + } + RootedScript script(cx, iter.script()); + bool ok = DisassembleAtPC(cx, script, true, iter.pc(), false, &sprinter); + fprintf(fp, "%s", sprinter.string()); + return ok; +} + +JS_PUBLIC_API bool js::DumpScript(JSContext* cx, JSScript* scriptArg, + FILE* fp) { + gc::AutoSuppressGC suppressGC(cx); + Sprinter sprinter(cx); + if (!sprinter.init()) { + return false; + } + RootedScript script(cx, scriptArg); + bool ok = Disassemble(cx, script, true, &sprinter); + fprintf(fp, "%s", sprinter.string()); + return ok; +} + +static UniqueChars ToDisassemblySource(JSContext* cx, HandleValue v) { + if (v.isString()) { + return QuoteString(cx, v.toString(), '"'); + } + + if (JS::RuntimeHeapIsBusy()) { + return DuplicateString(cx, ""); + } + + if (v.isObject()) { + JSObject& obj = v.toObject(); + + if (obj.is()) { + RootedFunction fun(cx, &obj.as()); + JSString* str = JS_DecompileFunction(cx, fun); + if (!str) { + return nullptr; + } + return QuoteString(cx, str); + } + + if (obj.is()) { + Rooted reobj(cx, &obj.as()); + JSString* source = RegExpObject::toString(cx, reobj); + if (!source) { + return nullptr; + } + return QuoteString(cx, source); + } + } + + JSString* str = ValueToSource(cx, v); + if (!str) { + return nullptr; + } + return QuoteString(cx, str); +} + +static bool ToDisassemblySource(JSContext* cx, Handle scope, + UniqueChars* bytes) { + UniqueChars source = JS_smprintf("%s {", ScopeKindString(scope->kind())); + if (!source) { + ReportOutOfMemory(cx); + return false; + } + + for (Rooted bi(cx, BindingIter(scope)); bi; bi++) { + UniqueChars nameBytes = AtomToPrintableString(cx, bi.name()); + if (!nameBytes) { + return false; + } + + source = JS_sprintf_append(std::move(source), "%s: ", nameBytes.get()); + if (!source) { + ReportOutOfMemory(cx); + return false; + } + + BindingLocation loc = bi.location(); + switch (loc.kind()) { + case BindingLocation::Kind::Global: + source = JS_sprintf_append(std::move(source), "global"); + break; + + case BindingLocation::Kind::Frame: + source = + JS_sprintf_append(std::move(source), "frame slot %u", loc.slot()); + break; + + case BindingLocation::Kind::Environment: + source = + JS_sprintf_append(std::move(source), "env slot %u", loc.slot()); + break; + + case BindingLocation::Kind::Argument: + source = + JS_sprintf_append(std::move(source), "arg slot %u", loc.slot()); + break; + + case BindingLocation::Kind::NamedLambdaCallee: + source = JS_sprintf_append(std::move(source), "named lambda callee"); + break; + + case BindingLocation::Kind::Import: + source = JS_sprintf_append(std::move(source), "import"); + break; + } + + if (!source) { + ReportOutOfMemory(cx); + return false; + } + + if (!bi.isLast()) { + source = JS_sprintf_append(std::move(source), ", "); + if (!source) { + ReportOutOfMemory(cx); + return false; + } + } + } + + source = JS_sprintf_append(std::move(source), "}"); + if (!source) { + ReportOutOfMemory(cx); + return false; + } + + *bytes = std::move(source); + return true; +} + +static bool DumpJumpOrigins(HandleScript script, jsbytecode* pc, + const BytecodeParser* parser, Sprinter* sp) { + bool called = false; + auto callback = [&script, &sp, &called](jsbytecode* pc, + BytecodeParser::JumpKind kind) { + if (!called) { + called = true; + if (!sp->put("\n# ")) { + return false; + } + } else { + if (!sp->put(", ")) { + return false; + } + } + + switch (kind) { + case BytecodeParser::JumpKind::Simple: + break; + + case BytecodeParser::JumpKind::SwitchCase: + if (!sp->put("switch-case ")) { + return false; + } + break; + + case BytecodeParser::JumpKind::SwitchDefault: + if (!sp->put("switch-default ")) { + return false; + } + break; + + case BytecodeParser::JumpKind::TryCatch: + if (!sp->put("try-catch ")) { + return false; + } + break; + + case BytecodeParser::JumpKind::TryFinally: + if (!sp->put("try-finally ")) { + return false; + } + break; + } + + if (!sp->jsprintf("from %s @ %05u", CodeName(JSOp(*pc)), + unsigned(script->pcToOffset(pc)))) { + return false; + } + + return true; + }; + if (!parser->forEachJumpOrigins(pc, callback)) { + return false; + } + if (called) { + if (!sp->put("\n")) { + return false; + } + } + + return true; +} + +static bool DecompileAtPCForStackDump( + JSContext* cx, HandleScript script, + const OffsetAndDefIndex& offsetAndDefIndex, Sprinter* sp); + +static bool PrintShapeProperties(JSContext* cx, Sprinter* sp, + SharedShape* shape) { + // Add all property keys to a vector to allow printing them in property + // definition order. + Vector props(cx); + for (SharedShapePropertyIter iter(shape); !iter.done(); iter++) { + if (!props.append(iter->key())) { + return false; + } + } + + if (!sp->put("{")) { + return false; + } + + for (size_t i = props.length(); i > 0; i--) { + PropertyKey key = props[i - 1]; + RootedValue keyv(cx, IdToValue(key)); + JSString* str = ToString(cx, keyv); + if (!str) { + ReportOutOfMemory(cx); + return false; + } + if (!sp->putString(str)) { + return false; + } + if (i > 1) { + if (!sp->put(", ")) { + return false; + } + } + } + + return sp->put("}"); +} + +static unsigned Disassemble1(JSContext* cx, HandleScript script, jsbytecode* pc, + unsigned loc, bool lines, + const BytecodeParser* parser, Sprinter* sp) { + if (parser && parser->isReachable(pc)) { + if (!DumpJumpOrigins(script, pc, parser, sp)) { + return 0; + } + } + + size_t before = sp->stringEnd() - sp->string(); + bool stackDumped = false; + auto dumpStack = [&cx, &script, &pc, &parser, &sp, &before, &stackDumped]() { + if (!parser) { + return true; + } + if (stackDumped) { + return true; + } + stackDumped = true; + + size_t after = sp->stringEnd() - sp->string(); + MOZ_ASSERT(after >= before); + + static const size_t stack_column = 40; + for (size_t i = after - before; i < stack_column - 1; i++) { + if (!sp->put(" ")) { + return false; + } + } + + if (!sp->put(" # ")) { + return false; + } + + if (!parser->isReachable(pc)) { + if (!sp->put("!!! UNREACHABLE !!!")) { + return false; + } + } else { + uint32_t depth = parser->stackDepthAfterPC(pc); + + for (uint32_t i = 0; i < depth; i++) { + if (i) { + if (!sp->put(" ")) { + return false; + } + } + + const OffsetAndDefIndex& offsetAndDefIndex = + parser->offsetForStackOperandAfterPC(script->pcToOffset(pc), i); + // This will decompile the stack for the same PC many times. + // We'll avoid optimizing it since this is a testing function + // and it won't be worth managing cached expression here. + if (!DecompileAtPCForStackDump(cx, script, offsetAndDefIndex, sp)) { + return false; + } + } + } + + return true; + }; + + if (*pc >= JSOP_LIMIT) { + char numBuf1[12], numBuf2[12]; + SprintfLiteral(numBuf1, "%d", int(*pc)); + SprintfLiteral(numBuf2, "%d", JSOP_LIMIT); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BYTECODE_TOO_BIG, numBuf1, numBuf2); + return 0; + } + JSOp op = JSOp(*pc); + const JSCodeSpec& cs = CodeSpec(op); + const unsigned len = cs.length; + if (!sp->jsprintf("%05u:", loc)) { + return 0; + } + if (lines) { + if (!sp->jsprintf("%4u", PCToLineNumber(script, pc))) { + return 0; + } + } + if (!sp->jsprintf(" %s", CodeName(op))) { + return 0; + } + + int i; + switch (JOF_TYPE(cs.format)) { + case JOF_BYTE: + break; + + case JOF_JUMP: { + ptrdiff_t off = GET_JUMP_OFFSET(pc); + if (!sp->jsprintf(" %u (%+d)", unsigned(loc + int(off)), int(off))) { + return 0; + } + break; + } + + case JOF_SCOPE: { + Rooted scope(cx, script->getScope(pc)); + UniqueChars bytes; + if (!ToDisassemblySource(cx, scope, &bytes)) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + + case JOF_ENVCOORD: { + RootedValue v(cx, StringValue(EnvironmentCoordinateNameSlow(script, pc))); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + EnvironmentCoordinate ec(pc); + if (!sp->jsprintf(" %s (hops = %u, slot = %u)", bytes.get(), ec.hops(), + ec.slot())) { + return 0; + } + break; + } + case JOF_DEBUGCOORD: { + EnvironmentCoordinate ec(pc); + if (!sp->jsprintf("(hops = %u, slot = %u)", ec.hops(), ec.slot())) { + return 0; + } + break; + } + case JOF_ATOM: { + RootedValue v(cx, StringValue(script->getAtom(pc))); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + case JOF_STRING: { + RootedValue v(cx, StringValue(script->getString(pc))); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + + case JOF_DOUBLE: { + double d = GET_INLINE_VALUE(pc).toDouble(); + if (!sp->jsprintf(" %lf", d)) { + return 0; + } + break; + } + + case JOF_BIGINT: { + RootedValue v(cx, BigIntValue(script->getBigInt(pc))); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + + case JOF_OBJECT: { + JSObject* obj = script->getObject(pc); + { + RootedValue v(cx, ObjectValue(*obj)); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + } + break; + } + + case JOF_SHAPE: { + SharedShape* shape = script->getShape(pc); + if (!sp->put(" ")) { + return 0; + } + if (!PrintShapeProperties(cx, sp, shape)) { + return 0; + } + break; + } + + case JOF_REGEXP: { + js::RegExpObject* obj = script->getRegExp(pc); + RootedValue v(cx, ObjectValue(*obj)); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + + case JOF_TABLESWITCH: { + int32_t i, low, high; + + ptrdiff_t off = GET_JUMP_OFFSET(pc); + jsbytecode* pc2 = pc + JUMP_OFFSET_LEN; + low = GET_JUMP_OFFSET(pc2); + pc2 += JUMP_OFFSET_LEN; + high = GET_JUMP_OFFSET(pc2); + pc2 += JUMP_OFFSET_LEN; + if (!sp->jsprintf(" defaultOffset %d low %d high %d", int(off), low, + high)) { + return 0; + } + + // Display stack dump before diplaying the offsets for each case. + if (!dumpStack()) { + return 0; + } + + for (i = low; i <= high; i++) { + off = + script->tableSwitchCaseOffset(pc, i - low) - script->pcToOffset(pc); + if (!sp->jsprintf("\n\t%d: %d", i, int(off))) { + return 0; + } + } + break; + } + + case JOF_QARG: + if (!sp->jsprintf(" %u", GET_ARGNO(pc))) { + return 0; + } + break; + + case JOF_LOCAL: + if (!sp->jsprintf(" %u", GET_LOCALNO(pc))) { + return 0; + } + break; + + case JOF_GCTHING: + if (!sp->jsprintf(" %u", unsigned(GET_GCTHING_INDEX(pc)))) { + return 0; + } + break; + + case JOF_UINT32: + if (!sp->jsprintf(" %u", GET_UINT32(pc))) { + return 0; + } + break; + + case JOF_ICINDEX: + if (!sp->jsprintf(" (ic: %u)", GET_ICINDEX(pc))) { + return 0; + } + break; + + case JOF_LOOPHEAD: + if (!sp->jsprintf(" (ic: %u, depthHint: %u)", GET_ICINDEX(pc), + LoopHeadDepthHint(pc))) { + return 0; + } + break; + + case JOF_TWO_UINT8: { + int one = (int)GET_UINT8(pc); + int two = (int)GET_UINT8(pc + 1); + + if (!sp->jsprintf(" %d", one)) { + return 0; + } + if (!sp->jsprintf(" %d", two)) { + return 0; + } + break; + } + + case JOF_ARGC: + case JOF_UINT16: + i = (int)GET_UINT16(pc); + goto print_int; + + case JOF_RESUMEINDEX: + case JOF_UINT24: + MOZ_ASSERT(len == 4); + i = (int)GET_UINT24(pc); + goto print_int; + + case JOF_UINT8: + i = GET_UINT8(pc); + goto print_int; + + case JOF_INT8: + i = GET_INT8(pc); + goto print_int; + + case JOF_INT32: + MOZ_ASSERT(op == JSOp::Int32); + i = GET_INT32(pc); + print_int: + if (!sp->jsprintf(" %d", i)) { + return 0; + } + break; + + default: { + char numBuf[12]; + SprintfLiteral(numBuf, "%x", cs.format); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_UNKNOWN_FORMAT, numBuf); + return 0; + } + } + + if (!dumpStack()) { + return 0; + } + + if (!sp->put("\n")) { + return 0; + } + return len; +} + +unsigned js::Disassemble1(JSContext* cx, JS::Handle script, + jsbytecode* pc, unsigned loc, bool lines, + Sprinter* sp) { + return Disassemble1(cx, script, pc, loc, lines, nullptr, sp); +} + +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + +namespace { +/* + * The expression decompiler is invoked by error handling code to produce a + * string representation of the erroring expression. As it's only a debugging + * tool, it only supports basic expressions. For anything complicated, it simply + * puts "(intermediate value)" into the error result. + * + * Here's the basic algorithm: + * + * 1. Find the stack location of the value whose expression we wish to + * decompile. The error handler can explicitly pass this as an + * argument. Otherwise, we search backwards down the stack for the offending + * value. + * + * 2. Instantiate and run a BytecodeParser for the current frame. This creates a + * stack of pcs parallel to the interpreter stack; given an interpreter stack + * location, the corresponding pc stack location contains the opcode that pushed + * the value in the interpreter. Now, with the result of step 1, we have the + * opcode responsible for pushing the value we want to decompile. + * + * 3. Pass the opcode to decompilePC. decompilePC is the main decompiler + * routine, responsible for a string representation of the expression that + * generated a certain stack location. decompilePC looks at one opcode and + * returns the JS source equivalent of that opcode. + * + * 4. Expressions can, of course, contain subexpressions. For example, the + * literals "4" and "5" are subexpressions of the addition operator in "4 + + * 5". If we need to decompile a subexpression, we call decompilePC (step 2) + * recursively on the operands' pcs. The result is a depth-first traversal of + * the expression tree. + * + */ +struct ExpressionDecompiler { + JSContext* cx; + RootedScript script; + const BytecodeParser& parser; + Sprinter sprinter; + +#if defined(DEBUG) || defined(JS_JITSPEW) + // Dedicated mode for stack dump. + // Generates an expression for stack dump, including internal state, + // and also disables special handling for self-hosted code. + bool isStackDump; +#endif + + ExpressionDecompiler(JSContext* cx, JSScript* script, + const BytecodeParser& parser) + : cx(cx), + script(cx, script), + parser(parser), + sprinter(cx) +#if defined(DEBUG) || defined(JS_JITSPEW) + , + isStackDump(false) +#endif + { + } + bool init(); + bool decompilePCForStackOperand(jsbytecode* pc, int i); + bool decompilePC(jsbytecode* pc, uint8_t defIndex); + bool decompilePC(const OffsetAndDefIndex& offsetAndDefIndex); + JSAtom* getArg(unsigned slot); + JSAtom* loadAtom(jsbytecode* pc); + JSString* loadString(jsbytecode* pc); + bool quote(JSString* s, char quote); + bool write(const char* s); + bool write(JSString* str); + UniqueChars getOutput(); +#if defined(DEBUG) || defined(JS_JITSPEW) + void setStackDump() { isStackDump = true; } +#endif +}; + +bool ExpressionDecompiler::decompilePCForStackOperand(jsbytecode* pc, int i) { + return decompilePC(parser.offsetForStackOperand(script->pcToOffset(pc), i)); +} + +bool ExpressionDecompiler::decompilePC(jsbytecode* pc, uint8_t defIndex) { + MOZ_ASSERT(script->containsPC(pc)); + + JSOp op = (JSOp)*pc; + + if (const char* token = CodeToken[uint8_t(op)]) { + MOZ_ASSERT(defIndex == 0); + MOZ_ASSERT(CodeSpec(op).ndefs == 1); + + // Handle simple cases of binary and unary operators. + switch (CodeSpec(op).nuses) { + case 2: { + const SrcNote* sn = GetSrcNote(cx, script, pc); + const char* extra = + sn && sn->type() == SrcNoteType::AssignOp ? "=" : ""; + return write("(") && decompilePCForStackOperand(pc, -2) && write(" ") && + write(token) && write(extra) && write(" ") && + decompilePCForStackOperand(pc, -1) && write(")"); + break; + } + case 1: + return write("(") && write(token) && + decompilePCForStackOperand(pc, -1) && write(")"); + default: + break; + } + } + + switch (op) { + case JSOp::DelName: + return write("(delete ") && write(loadAtom(pc)) && write(")"); + + case JSOp::GetGName: + case JSOp::GetName: + case JSOp::GetIntrinsic: + return write(loadAtom(pc)); + case JSOp::GetArg: { + unsigned slot = GET_ARGNO(pc); + + // For self-hosted scripts that are called from non-self-hosted code, + // decompiling the parameter name in the self-hosted script is + // unhelpful. Decompile the argument name instead. + if (script->selfHosted() +#ifdef DEBUG + // For stack dump, argument name is not necessary. + && !isStackDump +#endif /* DEBUG */ + ) { + UniqueChars result; + if (!DecompileArgumentFromStack(cx, slot, &result)) { + return false; + } + + // Note that decompiling the argument in the parent frame might + // not succeed. + if (result) { + return write(result.get()); + } + + // If it fails, do not return parameter name and let the caller + // fallback. + return write("(intermediate value)"); + } + + JSAtom* atom = getArg(slot); + if (!atom) { + return false; + } + return write(atom); + } + case JSOp::GetLocal: { + JSAtom* atom = FrameSlotName(script, pc); + MOZ_ASSERT(atom); + return write(atom); + } + case JSOp::GetAliasedVar: { + JSAtom* atom = EnvironmentCoordinateNameSlow(script, pc); + MOZ_ASSERT(atom); + return write(atom); + } + + case JSOp::DelProp: + case JSOp::StrictDelProp: + case JSOp::GetProp: + case JSOp::GetBoundName: { + bool hasDelete = op == JSOp::DelProp || op == JSOp::StrictDelProp; + Rooted prop(cx, loadAtom(pc)); + MOZ_ASSERT(prop); + return (hasDelete ? write("(delete ") : true) && + decompilePCForStackOperand(pc, -1) && + (IsIdentifier(prop) + ? write(".") && quote(prop, '\0') + : write("[") && quote(prop, '\'') && write("]")) && + (hasDelete ? write(")") : true); + } + case JSOp::GetPropSuper: { + Rooted prop(cx, loadAtom(pc)); + return write("super.") && quote(prop, '\0'); + } + case JSOp::SetElem: + case JSOp::StrictSetElem: + // NOTE: We don't show the right hand side of the operation because + // it's used in error messages like: "a[0] is not readable". + // + // We could though. + return decompilePCForStackOperand(pc, -3) && write("[") && + decompilePCForStackOperand(pc, -2) && write("]"); + + case JSOp::DelElem: + case JSOp::StrictDelElem: + case JSOp::GetElem: { + bool hasDelete = (op == JSOp::DelElem || op == JSOp::StrictDelElem); + return (hasDelete ? write("(delete ") : true) && + decompilePCForStackOperand(pc, -2) && write("[") && + decompilePCForStackOperand(pc, -1) && write("]") && + (hasDelete ? write(")") : true); + } + + case JSOp::GetElemSuper: + return write("super[") && decompilePCForStackOperand(pc, -2) && + write("]"); + case JSOp::Null: + return write(js_null_str); + case JSOp::True: + return write(js_true_str); + case JSOp::False: + return write(js_false_str); + case JSOp::Zero: + case JSOp::One: + case JSOp::Int8: + case JSOp::Uint16: + case JSOp::Uint24: + case JSOp::Int32: + return sprinter.printf("%d", GetBytecodeInteger(pc)); + case JSOp::String: + return quote(loadString(pc), '"'); + case JSOp::Symbol: { + unsigned i = uint8_t(pc[1]); + MOZ_ASSERT(i < JS::WellKnownSymbolLimit); + if (i < JS::WellKnownSymbolLimit) { + return write(cx->names().wellKnownSymbolDescriptions()[i]); + } + break; + } + case JSOp::Undefined: + return write(js_undefined_str); + case JSOp::GlobalThis: + case JSOp::NonSyntacticGlobalThis: + // |this| could convert to a very long object initialiser, so cite it by + // its keyword name. + return write(js_this_str); + case JSOp::NewTarget: + return write("new.target"); + case JSOp::Call: + case JSOp::CallContent: + case JSOp::CallIgnoresRv: + case JSOp::CallIter: + case JSOp::CallContentIter: { + uint16_t argc = GET_ARGC(pc); + return decompilePCForStackOperand(pc, -int32_t(argc + 2)) && + write(argc ? "(...)" : "()"); + } + case JSOp::SpreadCall: + return decompilePCForStackOperand(pc, -3) && write("(...)"); + case JSOp::NewArray: + return write("[]"); + case JSOp::RegExp: { + Rooted obj(cx, &script->getObject(pc)->as()); + JSString* str = RegExpObject::toString(cx, obj); + if (!str) { + return false; + } + return write(str); + } + case JSOp::Object: { + JSObject* obj = script->getObject(pc); + RootedValue objv(cx, ObjectValue(*obj)); + JSString* str = ValueToSource(cx, objv); + if (!str) { + return false; + } + return write(str); + } + case JSOp::Void: + return write("(void ") && decompilePCForStackOperand(pc, -1) && + write(")"); + + case JSOp::SuperCall: + if (GET_ARGC(pc) == 0) { + return write("super()"); + } + [[fallthrough]]; + case JSOp::SpreadSuperCall: + return write("super(...)"); + case JSOp::SuperFun: + return write("super"); + + case JSOp::Eval: + case JSOp::SpreadEval: + case JSOp::StrictEval: + case JSOp::StrictSpreadEval: + return write("eval(...)"); + + case JSOp::New: + case JSOp::NewContent: { + uint16_t argc = GET_ARGC(pc); + return write("(new ") && + decompilePCForStackOperand(pc, -int32_t(argc + 3)) && + write(argc ? "(...))" : "())"); + } + + case JSOp::SpreadNew: + return write("(new ") && decompilePCForStackOperand(pc, -4) && + write("(...))"); + + case JSOp::Typeof: + case JSOp::TypeofExpr: + return write("(typeof ") && decompilePCForStackOperand(pc, -1) && + write(")"); + + case JSOp::InitElemArray: + return write("[...]"); + + case JSOp::InitElemInc: + if (defIndex == 0) { + return write("[...]"); + } + MOZ_ASSERT(defIndex == 1); +#ifdef DEBUG + // INDEX won't be be exposed to error message. + if (isStackDump) { + return write("INDEX"); + } +#endif + break; + + case JSOp::ToNumeric: + return write("(tonumeric ") && decompilePCForStackOperand(pc, -1) && + write(")"); + + case JSOp::Inc: + return write("(inc ") && decompilePCForStackOperand(pc, -1) && write(")"); + + case JSOp::Dec: + return write("(dec ") && decompilePCForStackOperand(pc, -1) && write(")"); + + case JSOp::BigInt: +#if defined(DEBUG) || defined(JS_JITSPEW) + // BigInt::dump() only available in this configuration. + script->getBigInt(pc)->dump(sprinter); + return !sprinter.hadOutOfMemory(); +#else + return write("[bigint]"); +#endif + + case JSOp::BuiltinObject: { + auto kind = BuiltinObjectKind(GET_UINT8(pc)); + return write(BuiltinObjectName(kind)); + } + +#ifdef ENABLE_RECORD_TUPLE + case JSOp::InitTuple: + return write("#[]"); + + case JSOp::AddTupleElement: + case JSOp::FinishTuple: + return write("#[...]"); +#endif + + default: + break; + } + +#ifdef DEBUG + if (isStackDump) { + // Special decompilation for stack dump. + switch (op) { + case JSOp::Arguments: + return write("arguments"); + + case JSOp::BindGName: + return write("GLOBAL"); + + case JSOp::BindName: + case JSOp::BindVar: + return write("ENV"); + + case JSOp::Callee: + return write("CALLEE"); + + case JSOp::EnvCallee: + return write("ENVCALLEE"); + + case JSOp::CallSiteObj: + return write("OBJ"); + + case JSOp::Double: + return sprinter.printf("%lf", GET_INLINE_VALUE(pc).toDouble()); + + case JSOp::Exception: + return write("EXCEPTION"); + + case JSOp::Try: + // Used for the values live on entry to the finally block. + // See TryNoteKind::Finally above. + if (defIndex == 0) { + return write("PC"); + } + MOZ_ASSERT(defIndex == 1); + return write("THROWING"); + + case JSOp::FunctionThis: + case JSOp::ImplicitThis: + return write("THIS"); + + case JSOp::FunWithProto: + return write("FUN"); + + case JSOp::Generator: + return write("GENERATOR"); + + case JSOp::GetImport: + return write("VAL"); + + case JSOp::GetRval: + return write("RVAL"); + + case JSOp::Hole: + return write("HOLE"); + + case JSOp::IsGenClosing: + // For stack dump, defIndex == 0 is not used. + MOZ_ASSERT(defIndex == 1); + return write("ISGENCLOSING"); + + case JSOp::IsNoIter: + // For stack dump, defIndex == 0 is not used. + MOZ_ASSERT(defIndex == 1); + return write("ISNOITER"); + + case JSOp::IsConstructing: + return write("JS_IS_CONSTRUCTING"); + + case JSOp::IsNullOrUndefined: + return write("IS_NULL_OR_UNDEF"); + + case JSOp::Iter: + return write("ITER"); + + case JSOp::Lambda: + return write("FUN"); + + case JSOp::ToAsyncIter: + return write("ASYNCITER"); + + case JSOp::MoreIter: + // For stack dump, defIndex == 0 is not used. + MOZ_ASSERT(defIndex == 1); + return write("MOREITER"); + + case JSOp::MutateProto: + return write("SUCCEEDED"); + + case JSOp::NewInit: + case JSOp::NewObject: + case JSOp::ObjWithProto: + return write("OBJ"); + + case JSOp::OptimizeSpreadCall: + return write("OPTIMIZED"); + + case JSOp::Rest: + return write("REST"); + + case JSOp::Resume: + return write("RVAL"); + + case JSOp::SuperBase: + return write("HOMEOBJECTPROTO"); + + case JSOp::ToPropertyKey: + return write("TOPROPERTYKEY(") && decompilePCForStackOperand(pc, -1) && + write(")"); + case JSOp::ToString: + return write("TOSTRING(") && decompilePCForStackOperand(pc, -1) && + write(")"); + + case JSOp::Uninitialized: + return write("UNINITIALIZED"); + + case JSOp::InitialYield: + case JSOp::Await: + case JSOp::Yield: + // Printing "yield SOMETHING" is confusing since the operand doesn't + // match to the syntax, since the stack operand for "yield 10" is + // the result object, not 10. + if (defIndex == 0) { + return write("RVAL"); + } + if (defIndex == 1) { + return write("GENERATOR"); + } + MOZ_ASSERT(defIndex == 2); + return write("RESUMEKIND"); + + case JSOp::ResumeKind: + return write("RESUMEKIND"); + + case JSOp::AsyncAwait: + case JSOp::AsyncResolve: + return write("PROMISE"); + + case JSOp::CheckPrivateField: + return write("HasPrivateField"); + + case JSOp::NewPrivateName: + return write("PRIVATENAME"); + + case JSOp::CheckReturn: + return write("RVAL"); + + default: + break; + } + return write(""); + } +#endif /* DEBUG */ + + return write("(intermediate value)"); +} + +bool ExpressionDecompiler::decompilePC( + const OffsetAndDefIndex& offsetAndDefIndex) { + if (offsetAndDefIndex.isSpecial()) { +#ifdef DEBUG + if (isStackDump) { + if (offsetAndDefIndex.isMerged()) { + if (!write("merged<")) { + return false; + } + } else if (offsetAndDefIndex.isIgnored()) { + if (!write("ignored<")) { + return false; + } + } + + if (!decompilePC(script->offsetToPC(offsetAndDefIndex.specialOffset()), + offsetAndDefIndex.specialDefIndex())) { + return false; + } + + if (!write(">")) { + return false; + } + + return true; + } +#endif /* DEBUG */ + return write("(intermediate value)"); + } + + return decompilePC(script->offsetToPC(offsetAndDefIndex.offset()), + offsetAndDefIndex.defIndex()); +} + +bool ExpressionDecompiler::init() { + cx->check(script); + return sprinter.init(); +} + +bool ExpressionDecompiler::write(const char* s) { return sprinter.put(s); } + +bool ExpressionDecompiler::write(JSString* str) { + if (str == cx->names().dotThis) { + return write("this"); + } + if (str == cx->names().dotNewTarget) { + return write("new.target"); + } + return sprinter.putString(str); +} + +bool ExpressionDecompiler::quote(JSString* s, char quote) { + return QuoteString(&sprinter, s, quote); +} + +JSAtom* ExpressionDecompiler::loadAtom(jsbytecode* pc) { + return script->getAtom(pc); +} + +JSString* ExpressionDecompiler::loadString(jsbytecode* pc) { + return script->getString(pc); +} + +JSAtom* ExpressionDecompiler::getArg(unsigned slot) { + MOZ_ASSERT(script->isFunction()); + MOZ_ASSERT(slot < script->numArgs()); + + for (PositionalFormalParameterIter fi(script); fi; fi++) { + if (fi.argumentSlot() == slot) { + if (!fi.isDestructured()) { + return fi.name(); + } + + // Destructured arguments have no single binding name. + static const char destructuredParam[] = "(destructured parameter)"; + return Atomize(cx, destructuredParam, strlen(destructuredParam)); + } + } + + MOZ_CRASH("No binding"); +} + +UniqueChars ExpressionDecompiler::getOutput() { + ptrdiff_t len = sprinter.stringEnd() - sprinter.stringAt(0); + auto res = cx->make_pod_array(len + 1); + if (!res) { + return nullptr; + } + js_memcpy(res.get(), sprinter.stringAt(0), len); + res[len] = 0; + return res; +} + +} // anonymous namespace + +#if defined(DEBUG) || defined(JS_JITSPEW) +static bool DecompileAtPCForStackDump( + JSContext* cx, HandleScript script, + const OffsetAndDefIndex& offsetAndDefIndex, Sprinter* sp) { + // The expression decompiler asserts the script is in the current realm. + AutoRealm ar(cx, script); + + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), script); + parser.setStackDump(); + if (!parser.parse()) { + return false; + } + + ExpressionDecompiler ed(cx, script, parser); + ed.setStackDump(); + if (!ed.init()) { + return false; + } + + if (!ed.decompilePC(offsetAndDefIndex)) { + return false; + } + + UniqueChars result = ed.getOutput(); + if (!result) { + return false; + } + + return sp->put(result.get()); +} +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + +static bool FindStartPC(JSContext* cx, const FrameIter& iter, + const BytecodeParser& parser, int spindex, + int skipStackHits, const Value& v, jsbytecode** valuepc, + uint8_t* defIndex) { + jsbytecode* current = *valuepc; + *valuepc = nullptr; + *defIndex = 0; + + if (spindex < 0 && spindex + int(parser.stackDepthAtPC(current)) < 0) { + spindex = JSDVG_SEARCH_STACK; + } + + if (spindex == JSDVG_SEARCH_STACK) { + size_t index = iter.numFrameSlots(); + + // The decompiler may be called from inside functions that are not + // called from script, but via the C++ API directly, such as + // Invoke. In that case, the youngest script frame may have a + // completely unrelated pc and stack depth, so we give up. + if (index < size_t(parser.stackDepthAtPC(current))) { + return true; + } + + // We search from fp->sp to base to find the most recently calculated + // value matching v under assumption that it is the value that caused + // the exception. + int stackHits = 0; + Value s; + do { + if (!index) { + return true; + } + s = iter.frameSlotValue(--index); + } while (s != v || stackHits++ != skipStackHits); + + // If the current PC has fewer values on the stack than the index we are + // looking for, the blamed value must be one pushed by the current + // bytecode (e.g. JSOp::MoreIter), so restore *valuepc. + if (index < size_t(parser.stackDepthAtPC(current))) { + *valuepc = parser.pcForStackOperand(current, index, defIndex); + } else { + *valuepc = current; + *defIndex = index - size_t(parser.stackDepthAtPC(current)); + } + } else { + *valuepc = parser.pcForStackOperand(current, spindex, defIndex); + } + return true; +} + +static bool DecompileExpressionFromStack(JSContext* cx, int spindex, + int skipStackHits, HandleValue v, + UniqueChars* res) { + MOZ_ASSERT(spindex < 0 || spindex == JSDVG_IGNORE_STACK || + spindex == JSDVG_SEARCH_STACK); + + *res = nullptr; + + /* + * Give up if we need deterministic behavior for differential testing. + * IonMonkey doesn't use InterpreterFrames and this ensures we get the same + * error messages. + */ + if (js::SupportDifferentialTesting()) { + return true; + } + + if (spindex == JSDVG_IGNORE_STACK) { + return true; + } + + FrameIter frameIter(cx); + + if (frameIter.done() || !frameIter.hasScript() || + frameIter.realm() != cx->realm() || frameIter.inPrologue()) { + return true; + } + + /* + * FIXME: Fall back if iter.isIon(), since the stack snapshot may be for the + * previous pc (see bug 831120). + */ + if (frameIter.isIon()) { + return true; + } + + RootedScript script(cx, frameIter.script()); + jsbytecode* valuepc = frameIter.pc(); + + MOZ_ASSERT(script->containsPC(valuepc)); + + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), frameIter.script()); + if (!parser.parse()) { + return false; + } + + uint8_t defIndex; + if (!FindStartPC(cx, frameIter, parser, spindex, skipStackHits, v, &valuepc, + &defIndex)) { + return false; + } + if (!valuepc) { + return true; + } + + ExpressionDecompiler ed(cx, script, parser); + if (!ed.init()) { + return false; + } + if (!ed.decompilePC(valuepc, defIndex)) { + return false; + } + + *res = ed.getOutput(); + return *res != nullptr; +} + +UniqueChars js::DecompileValueGenerator(JSContext* cx, int spindex, + HandleValue v, HandleString fallbackArg, + int skipStackHits) { + RootedString fallback(cx, fallbackArg); + { + UniqueChars result; + if (!DecompileExpressionFromStack(cx, spindex, skipStackHits, v, &result)) { + return nullptr; + } + if (result && strcmp(result.get(), "(intermediate value)")) { + return result; + } + } + if (!fallback) { + if (v.isUndefined()) { + return DuplicateString( + cx, js_undefined_str); // Prevent users from seeing "(void 0)" + } + fallback = ValueToSource(cx, v); + if (!fallback) { + return nullptr; + } + } + + return StringToNewUTF8CharsZ(cx, *fallback); +} + +static bool DecompileArgumentFromStack(JSContext* cx, int formalIndex, + UniqueChars* res) { + MOZ_ASSERT(formalIndex >= 0); + + *res = nullptr; + + /* See note in DecompileExpressionFromStack. */ + if (js::SupportDifferentialTesting()) { + return true; + } + + /* + * Settle on the nearest script frame, which should be the builtin that + * called the intrinsic. + */ + FrameIter frameIter(cx); + MOZ_ASSERT(!frameIter.done()); + MOZ_ASSERT(frameIter.script()->selfHosted()); + + /* + * Get the second-to-top frame, the non-self-hosted caller of the builtin + * that called the intrinsic. + */ + ++frameIter; + if (frameIter.done() || !frameIter.hasScript() || + frameIter.script()->selfHosted() || frameIter.realm() != cx->realm()) { + return true; + } + + RootedScript script(cx, frameIter.script()); + jsbytecode* current = frameIter.pc(); + + MOZ_ASSERT(script->containsPC(current)); + + if (current < script->main()) { + return true; + } + + /* Don't handle getters, setters or calls from fun.call/fun.apply. */ + JSOp op = JSOp(*current); + if (op != JSOp::Call && op != JSOp::CallContent && + op != JSOp::CallIgnoresRv && op != JSOp::New && op != JSOp::NewContent) { + return true; + } + + if (static_cast(formalIndex) >= GET_ARGC(current)) { + return true; + } + + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), script); + if (!parser.parse()) { + return false; + } + + bool pushedNewTarget = op == JSOp::New || op == JSOp::NewContent; + int formalStackIndex = parser.stackDepthAtPC(current) - GET_ARGC(current) - + pushedNewTarget + formalIndex; + MOZ_ASSERT(formalStackIndex >= 0); + if (uint32_t(formalStackIndex) >= parser.stackDepthAtPC(current)) { + return true; + } + + ExpressionDecompiler ed(cx, script, parser); + if (!ed.init()) { + return false; + } + if (!ed.decompilePCForStackOperand(current, formalStackIndex)) { + return false; + } + + *res = ed.getOutput(); + return *res != nullptr; +} + +JSString* js::DecompileArgument(JSContext* cx, int formalIndex, HandleValue v) { + { + UniqueChars result; + if (!DecompileArgumentFromStack(cx, formalIndex, &result)) { + return nullptr; + } + if (result && strcmp(result.get(), "(intermediate value)")) { + JS::ConstUTF8CharsZ utf8chars(result.get(), strlen(result.get())); + return NewStringCopyUTF8Z(cx, utf8chars); + } + } + if (v.isUndefined()) { + return cx->names().undefined; // Prevent users from seeing "(void 0)" + } + + return ValueToSource(cx, v); +} + +extern bool js::IsValidBytecodeOffset(JSContext* cx, JSScript* script, + size_t offset) { + // This could be faster (by following jump instructions if the target + // is <= offset). + for (BytecodeRange r(cx, script); !r.empty(); r.popFront()) { + size_t here = r.frontOffset(); + if (here >= offset) { + return here == offset; + } + } + return false; +} + +/* + * There are three possible PCCount profiling states: + * + * 1. None: Neither scripts nor the runtime have count information. + * 2. Profile: Active scripts have count information, the runtime does not. + * 3. Query: Scripts do not have count information, the runtime does. + * + * When starting to profile scripts, counting begins immediately, with all JIT + * code discarded and recompiled with counts as necessary. Active interpreter + * frames will not begin profiling until they begin executing another script + * (via a call or return). + * + * The below API functions manage transitions to new states, according + * to the table below. + * + * Old State + * ------------------------- + * Function None Profile Query + * -------- + * StartPCCountProfiling Profile Profile Profile + * StopPCCountProfiling None Query Query + * PurgePCCounts None None None + */ + +static void ReleaseScriptCounts(JSRuntime* rt) { + MOZ_ASSERT(rt->scriptAndCountsVector); + + js_delete(rt->scriptAndCountsVector.ref()); + rt->scriptAndCountsVector = nullptr; +} + +void JS::StartPCCountProfiling(JSContext* cx) { + JSRuntime* rt = cx->runtime(); + + if (rt->profilingScripts) { + return; + } + + if (rt->scriptAndCountsVector) { + ReleaseScriptCounts(rt); + } + + ReleaseAllJITCode(rt->gcContext()); + + rt->profilingScripts = true; +} + +void JS::StopPCCountProfiling(JSContext* cx) { + JSRuntime* rt = cx->runtime(); + + if (!rt->profilingScripts) { + return; + } + MOZ_ASSERT(!rt->scriptAndCountsVector); + + ReleaseAllJITCode(rt->gcContext()); + + auto* vec = cx->new_>( + cx, ScriptAndCountsVector()); + if (!vec) { + return; + } + + for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) { + for (auto base = zone->cellIter(); !base.done(); base.next()) { + if (base->hasScriptCounts() && base->hasJitScript()) { + if (!vec->append(base->asJSScript())) { + return; + } + } + } + } + + rt->profilingScripts = false; + rt->scriptAndCountsVector = vec; +} + +void JS::PurgePCCounts(JSContext* cx) { + JSRuntime* rt = cx->runtime(); + + if (!rt->scriptAndCountsVector) { + return; + } + MOZ_ASSERT(!rt->profilingScripts); + + ReleaseScriptCounts(rt); +} + +size_t JS::GetPCCountScriptCount(JSContext* cx) { + JSRuntime* rt = cx->runtime(); + + if (!rt->scriptAndCountsVector) { + return 0; + } + + return rt->scriptAndCountsVector->length(); +} + +[[nodiscard]] static bool JSONStringProperty(Sprinter& sp, JSONPrinter& json, + const char* name, JSString* str) { + json.beginStringProperty(name); + if (!JSONQuoteString(&sp, str)) { + return false; + } + json.endStringProperty(); + return true; +} + +JSString* JS::GetPCCountScriptSummary(JSContext* cx, size_t index) { + JSRuntime* rt = cx->runtime(); + + if (!rt->scriptAndCountsVector || + index >= rt->scriptAndCountsVector->length()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BUFFER_TOO_SMALL); + return nullptr; + } + + const ScriptAndCounts& sac = (*rt->scriptAndCountsVector)[index]; + RootedScript script(cx, sac.script); + + Sprinter sp(cx); + if (!sp.init()) { + return nullptr; + } + + JSONPrinter json(sp, false); + + json.beginObject(); + + Rooted filenameStr(cx); + if (const char* filename = script->filename()) { + filenameStr = + JS_NewStringCopyUTF8N(cx, JS::UTF8Chars(filename, strlen(filename))); + } else { + filenameStr = JS_GetEmptyString(cx); + } + if (!filenameStr) { + return nullptr; + } + if (!JSONStringProperty(sp, json, "file", filenameStr)) { + return nullptr; + } + json.property("line", script->lineno()); + + if (JSFunction* fun = script->function()) { + if (JSAtom* atom = fun->displayAtom()) { + if (!JSONStringProperty(sp, json, "name", atom)) { + return nullptr; + } + } + } + + uint64_t total = 0; + + AllBytecodesIterable iter(script); + for (BytecodeLocation loc : iter) { + if (const PCCounts* counts = sac.maybeGetPCCounts(loc.toRawBytecode())) { + total += counts->numExec(); + } + } + + json.beginObjectProperty("totals"); + + json.property(PCCounts::numExecName, total); + + uint64_t ionActivity = 0; + jit::IonScriptCounts* ionCounts = sac.getIonCounts(); + while (ionCounts) { + for (size_t i = 0; i < ionCounts->numBlocks(); i++) { + ionActivity += ionCounts->block(i).hitCount(); + } + ionCounts = ionCounts->previous(); + } + if (ionActivity) { + json.property("ion", ionActivity); + } + + json.endObject(); + + json.endObject(); + + if (sp.hadOutOfMemory()) { + return nullptr; + } + + return NewStringCopyZ(cx, sp.string()); +} + +static bool GetPCCountJSON(JSContext* cx, const ScriptAndCounts& sac, + Sprinter& sp) { + JSONPrinter json(sp, false); + + RootedScript script(cx, sac.script); + + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), script); + if (!parser.parse()) { + return false; + } + + json.beginObject(); + + JSString* str = JS_DecompileScript(cx, script); + if (!str) { + return false; + } + + if (!JSONStringProperty(sp, json, "text", str)) { + return false; + } + + json.property("line", script->lineno()); + + json.beginListProperty("opcodes"); + + uint64_t hits = 0; + for (BytecodeRangeWithPosition range(cx, script); !range.empty(); + range.popFront()) { + jsbytecode* pc = range.frontPC(); + size_t offset = script->pcToOffset(pc); + JSOp op = JSOp(*pc); + + // If the current instruction is a jump target, + // then update the number of hits. + if (const PCCounts* counts = sac.maybeGetPCCounts(pc)) { + hits = counts->numExec(); + } + + json.beginObject(); + + json.property("id", offset); + json.property("line", range.frontLineNumber()); + json.property("name", CodeName(op)); + + { + ExpressionDecompiler ed(cx, script, parser); + if (!ed.init()) { + return false; + } + // defIndex passed here is not used. + if (!ed.decompilePC(pc, /* defIndex = */ 0)) { + return false; + } + UniqueChars text = ed.getOutput(); + if (!text) { + return false; + } + + JS::ConstUTF8CharsZ utf8chars(text.get(), strlen(text.get())); + JSString* str = NewStringCopyUTF8Z(cx, utf8chars); + if (!str) { + return false; + } + + if (!JSONStringProperty(sp, json, "text", str)) { + return false; + } + } + + json.beginObjectProperty("counts"); + if (hits > 0) { + json.property(PCCounts::numExecName, hits); + } + json.endObject(); + + json.endObject(); + + // If the current instruction has thrown, + // then decrement the hit counts with the number of throws. + if (const PCCounts* counts = sac.maybeGetThrowCounts(pc)) { + hits -= counts->numExec(); + } + } + + json.endList(); + + if (jit::IonScriptCounts* ionCounts = sac.getIonCounts()) { + json.beginListProperty("ion"); + + while (ionCounts) { + json.beginList(); + for (size_t i = 0; i < ionCounts->numBlocks(); i++) { + const jit::IonBlockCounts& block = ionCounts->block(i); + + json.beginObject(); + json.property("id", block.id()); + json.property("offset", block.offset()); + + json.beginListProperty("successors"); + for (size_t j = 0; j < block.numSuccessors(); j++) { + json.value(block.successor(j)); + } + json.endList(); + + json.property("hits", block.hitCount()); + + JSString* str = NewStringCopyZ(cx, block.code()); + if (!str) { + return false; + } + + if (!JSONStringProperty(sp, json, "code", str)) { + return false; + } + + json.endObject(); + } + json.endList(); + + ionCounts = ionCounts->previous(); + } + + json.endList(); + } + + json.endObject(); + + return !sp.hadOutOfMemory(); +} + +JSString* JS::GetPCCountScriptContents(JSContext* cx, size_t index) { + JSRuntime* rt = cx->runtime(); + + if (!rt->scriptAndCountsVector || + index >= rt->scriptAndCountsVector->length()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BUFFER_TOO_SMALL); + return nullptr; + } + + const ScriptAndCounts& sac = (*rt->scriptAndCountsVector)[index]; + JSScript* script = sac.script; + + Sprinter sp(cx); + if (!sp.init()) { + return nullptr; + } + + { + AutoRealm ar(cx, &script->global()); + if (!GetPCCountJSON(cx, sac, sp)) { + return nullptr; + } + } + + if (sp.hadOutOfMemory()) { + return nullptr; + } + + return NewStringCopyZ(cx, sp.string()); +} + +struct CollectedScripts { + MutableHandle scripts; + bool ok = true; + + explicit CollectedScripts(MutableHandle scripts) + : scripts(scripts) {} + + static void consider(JSRuntime* rt, void* data, BaseScript* script, + const JS::AutoRequireNoGC& nogc) { + auto self = static_cast(data); + if (!script->filename()) { + return; + } + if (!self->scripts.append(script->asJSScript())) { + self->ok = false; + } + } +}; + +static bool GenerateLcovInfo(JSContext* cx, JS::Realm* realm, + GenericPrinter& out) { + AutoRealmUnchecked ar(cx, realm); + + // Collect the list of scripts which are part of the current realm. + + MOZ_RELEASE_ASSERT( + coverage::IsLCovEnabled(), + "Coverage must be enabled for process before generating LCov info"); + + // Hold the scripts that we have already flushed, to avoid flushing them + // twice. + using JSScriptSet = GCHashSet; + Rooted scriptsDone(cx, JSScriptSet(cx)); + + Rooted queue(cx, ScriptVector(cx)); + + { + CollectedScripts result(&queue); + IterateScripts(cx, realm, &result, &CollectedScripts::consider); + if (!result.ok) { + ReportOutOfMemory(cx); + return false; + } + } + + if (queue.length() == 0) { + return true; + } + + // Ensure the LCovRealm exists to collect info into. + coverage::LCovRealm* lcovRealm = realm->lcovRealm(); + if (!lcovRealm) { + return false; + } + + // Collect code coverage info for one realm. + do { + RootedScript script(cx, queue.popCopy()); + RootedFunction fun(cx); + + JSScriptSet::AddPtr entry = scriptsDone.lookupForAdd(script); + if (entry) { + continue; + } + + if (!coverage::CollectScriptCoverage(script, false)) { + ReportOutOfMemory(cx); + return false; + } + + script->resetScriptCounts(); + + if (!scriptsDone.add(entry, script)) { + return false; + } + + if (!script->isTopLevel()) { + continue; + } + + // Iterate from the last to the first object in order to have + // the functions them visited in the opposite order when popping + // elements from the stack of remaining scripts, such that the + // functions are more-less listed with increasing line numbers. + auto gcthings = script->gcthings(); + for (JS::GCCellPtr gcThing : mozilla::Reversed(gcthings)) { + if (!gcThing.is()) { + continue; + } + JSObject* obj = &gcThing.as(); + + if (!obj->is()) { + continue; + } + fun = &obj->as(); + + // Ignore asm.js functions + if (!fun->isInterpreted()) { + continue; + } + + // Queue the script in the list of script associated to the + // current source. + JSScript* childScript = JSFunction::getOrCreateScript(cx, fun); + if (!childScript || !queue.append(childScript)) { + return false; + } + } + } while (!queue.empty()); + + bool isEmpty = true; + lcovRealm->exportInto(out, &isEmpty); + if (out.hadOutOfMemory()) { + return false; + } + + return true; +} + +JS_PUBLIC_API UniqueChars js::GetCodeCoverageSummaryAll(JSContext* cx, + size_t* length) { + Sprinter out(cx); + if (!out.init()) { + return nullptr; + } + + for (RealmsIter realm(cx->runtime()); !realm.done(); realm.next()) { + if (!GenerateLcovInfo(cx, realm, out)) { + return nullptr; + } + } + + *length = out.getOffset(); + return js::DuplicateString(cx, out.string(), *length); +} + +JS_PUBLIC_API UniqueChars js::GetCodeCoverageSummary(JSContext* cx, + size_t* length) { + Sprinter out(cx); + if (!out.init()) { + return nullptr; + } + + if (!GenerateLcovInfo(cx, cx->realm(), out)) { + return nullptr; + } + + *length = out.getOffset(); + return js::DuplicateString(cx, out.string(), *length); +} diff --git a/js/src/vm/BytecodeUtil.h b/js/src/vm/BytecodeUtil.h new file mode 100644 index 0000000000..ba3280ed90 --- /dev/null +++ b/js/src/vm/BytecodeUtil.h @@ -0,0 +1,665 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeUtil_h +#define vm_BytecodeUtil_h + +/* + * JS bytecode definitions. + */ + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/EndianUtils.h" + +#include +#include +#include + +#include "jstypes.h" +#include "NamespaceImports.h" + +#include "js/TypeDecls.h" +#include "js/Utility.h" +#include "js/Value.h" +#include "vm/BytecodeFormatFlags.h" // JOF_* +#include "vm/GeneratorResumeKind.h" +#include "vm/Opcodes.h" +#include "vm/SharedStencil.h" // js::GCThingIndex +#include "vm/ThrowMsgKind.h" // ThrowMsgKind, ThrowCondition + +namespace js { +class JS_PUBLIC_API Sprinter; +} // namespace js + +/* Shorthand for type from format. */ + +static inline uint32_t JOF_TYPE(uint32_t fmt) { return fmt & JOF_TYPEMASK; } + +/* Shorthand for mode from format. */ + +static inline uint32_t JOF_MODE(uint32_t fmt) { return fmt & JOF_MODEMASK; } + +/* + * Immediate operand getters, setters, and bounds. + */ + +static MOZ_ALWAYS_INLINE uint8_t GET_UINT8(jsbytecode* pc) { + return uint8_t(pc[1]); +} + +static MOZ_ALWAYS_INLINE void SET_UINT8(jsbytecode* pc, uint8_t u) { + pc[1] = jsbytecode(u); +} + +/* Common uint16_t immediate format helpers. */ + +static inline jsbytecode UINT16_HI(uint16_t i) { return jsbytecode(i >> 8); } + +static inline jsbytecode UINT16_LO(uint16_t i) { return jsbytecode(i); } + +static MOZ_ALWAYS_INLINE uint16_t GET_UINT16(const jsbytecode* pc) { + uint16_t result; + mozilla::NativeEndian::copyAndSwapFromLittleEndian(&result, pc + 1, 1); + return result; +} + +static MOZ_ALWAYS_INLINE void SET_UINT16(jsbytecode* pc, uint16_t i) { + mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &i, 1); +} + +static const unsigned UINT16_LIMIT = 1 << 16; + +/* Helpers for accessing the offsets of jump opcodes. */ +static const unsigned JUMP_OFFSET_LEN = 4; +static const int32_t JUMP_OFFSET_MIN = INT32_MIN; +static const int32_t JUMP_OFFSET_MAX = INT32_MAX; + +static MOZ_ALWAYS_INLINE uint32_t GET_UINT24(const jsbytecode* pc) { +#if MOZ_LITTLE_ENDIAN() + // Do a single 32-bit load (for opcode and operand), then shift off the + // opcode. + uint32_t result; + memcpy(&result, pc, 4); + return result >> 8; +#else + return uint32_t((pc[3] << 16) | (pc[2] << 8) | pc[1]); +#endif +} + +static MOZ_ALWAYS_INLINE void SET_UINT24(jsbytecode* pc, uint32_t i) { + MOZ_ASSERT(i < (1 << 24)); + +#if MOZ_LITTLE_ENDIAN() + memcpy(pc + 1, &i, 3); +#else + pc[1] = jsbytecode(i); + pc[2] = jsbytecode(i >> 8); + pc[3] = jsbytecode(i >> 16); +#endif +} + +static MOZ_ALWAYS_INLINE int8_t GET_INT8(const jsbytecode* pc) { + return int8_t(pc[1]); +} + +static MOZ_ALWAYS_INLINE uint32_t GET_UINT32(const jsbytecode* pc) { + uint32_t result; + mozilla::NativeEndian::copyAndSwapFromLittleEndian(&result, pc + 1, 1); + return result; +} + +static MOZ_ALWAYS_INLINE void SET_UINT32(jsbytecode* pc, uint32_t u) { + mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &u, 1); +} + +static MOZ_ALWAYS_INLINE JS::Value GET_INLINE_VALUE(const jsbytecode* pc) { + uint64_t raw; + mozilla::NativeEndian::copyAndSwapFromLittleEndian(&raw, pc + 1, 1); + return JS::Value::fromRawBits(raw); +} + +static MOZ_ALWAYS_INLINE void SET_INLINE_VALUE(jsbytecode* pc, + const JS::Value& v) { + uint64_t raw = v.asRawBits(); + mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &raw, 1); +} + +static MOZ_ALWAYS_INLINE int32_t GET_INT32(const jsbytecode* pc) { + return static_cast(GET_UINT32(pc)); +} + +static MOZ_ALWAYS_INLINE void SET_INT32(jsbytecode* pc, int32_t i) { + SET_UINT32(pc, static_cast(i)); +} + +static MOZ_ALWAYS_INLINE int32_t GET_JUMP_OFFSET(jsbytecode* pc) { + return GET_INT32(pc); +} + +static MOZ_ALWAYS_INLINE void SET_JUMP_OFFSET(jsbytecode* pc, int32_t off) { + SET_INT32(pc, off); +} + +static const unsigned GCTHING_INDEX_LEN = 4; + +static MOZ_ALWAYS_INLINE js::GCThingIndex GET_GCTHING_INDEX( + const jsbytecode* pc) { + return js::GCThingIndex(GET_UINT32(pc)); +} + +static MOZ_ALWAYS_INLINE void SET_GCTHING_INDEX(jsbytecode* pc, + js::GCThingIndex index) { + SET_UINT32(pc, index.index); +} + +// Index limit is determined by SrcNote::FourByteOffsetFlag, see +// frontend/BytecodeEmitter.h. +static const unsigned INDEX_LIMIT_LOG2 = 31; +static const uint32_t INDEX_LIMIT = uint32_t(1) << INDEX_LIMIT_LOG2; + +static inline jsbytecode ARGC_HI(uint16_t argc) { return UINT16_HI(argc); } + +static inline jsbytecode ARGC_LO(uint16_t argc) { return UINT16_LO(argc); } + +static inline uint16_t GET_ARGC(const jsbytecode* pc) { return GET_UINT16(pc); } + +static const unsigned ARGC_LIMIT = UINT16_LIMIT; + +static inline uint16_t GET_ARGNO(const jsbytecode* pc) { + return GET_UINT16(pc); +} + +static inline void SET_ARGNO(jsbytecode* pc, uint16_t argno) { + SET_UINT16(pc, argno); +} + +static const unsigned ARGNO_LEN = 2; +static const unsigned ARGNO_LIMIT = UINT16_LIMIT; + +static inline uint32_t GET_LOCALNO(const jsbytecode* pc) { + return GET_UINT24(pc); +} + +static inline void SET_LOCALNO(jsbytecode* pc, uint32_t varno) { + SET_UINT24(pc, varno); +} + +static const unsigned LOCALNO_LEN = 3; +static const unsigned LOCALNO_BITS = 24; +static const uint32_t LOCALNO_LIMIT = 1 << LOCALNO_BITS; + +static inline uint32_t GET_RESUMEINDEX(const jsbytecode* pc) { + return GET_UINT24(pc); +} + +static inline void SET_RESUMEINDEX(jsbytecode* pc, uint32_t resumeIndex) { + SET_UINT24(pc, resumeIndex); +} + +static const unsigned ICINDEX_LEN = 4; + +static inline uint32_t GET_ICINDEX(const jsbytecode* pc) { + return GET_UINT32(pc); +} + +static inline void SET_ICINDEX(jsbytecode* pc, uint32_t icIndex) { + SET_UINT32(pc, icIndex); +} + +static inline unsigned LoopHeadDepthHint(jsbytecode* pc) { + MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead); + return GET_UINT8(pc + 4); +} + +static inline void SetLoopHeadDepthHint(jsbytecode* pc, unsigned loopDepth) { + MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead); + uint8_t data = std::min(loopDepth, unsigned(UINT8_MAX)); + SET_UINT8(pc + 4, data); +} + +static inline bool IsBackedgePC(jsbytecode* pc) { + switch (JSOp(*pc)) { + case JSOp::Goto: + case JSOp::JumpIfTrue: + return GET_JUMP_OFFSET(pc) < 0; + default: + return false; + } +} + +static inline bool IsBackedgeForLoopHead(jsbytecode* pc, jsbytecode* loopHead) { + MOZ_ASSERT(JSOp(*loopHead) == JSOp::LoopHead); + return IsBackedgePC(pc) && pc + GET_JUMP_OFFSET(pc) == loopHead; +} + +/* + * Describes the 'hops' component of a JOF_ENVCOORD opcode. + * + * Note: this component is only 8 bits wide, limiting the maximum number of + * scopes between a use and def to roughly 255. This is a pretty small limit but + * note that SpiderMonkey's recursive descent parser can only parse about this + * many functions before hitting the C-stack recursion limit so this shouldn't + * be a significant limitation in practice. + */ + +static inline uint8_t GET_ENVCOORD_HOPS(jsbytecode* pc) { + return GET_UINT8(pc); +} + +static inline void SET_ENVCOORD_HOPS(jsbytecode* pc, uint8_t hops) { + SET_UINT8(pc, hops); +} + +static const unsigned ENVCOORD_HOPS_LEN = 1; +static const unsigned ENVCOORD_HOPS_BITS = 8; +static const unsigned ENVCOORD_HOPS_LIMIT = 1 << ENVCOORD_HOPS_BITS; + +/* Describes the 'slot' component of a JOF_ENVCOORD opcode. */ +static inline uint32_t GET_ENVCOORD_SLOT(const jsbytecode* pc) { + return GET_UINT24(pc); +} + +static inline void SET_ENVCOORD_SLOT(jsbytecode* pc, uint32_t slot) { + SET_UINT24(pc, slot); +} + +static const unsigned ENVCOORD_SLOT_LEN = 3; +static const unsigned ENVCOORD_SLOT_BITS = 24; +static const uint32_t ENVCOORD_SLOT_LIMIT = 1 << ENVCOORD_SLOT_BITS; + +struct JSCodeSpec { + uint8_t length; /* length including opcode byte */ + int8_t nuses; /* arity, -1 if variadic */ + int8_t ndefs; /* number of stack results */ + uint32_t format; /* immediate operand format */ +}; + +namespace js { + +extern const JSCodeSpec CodeSpecTable[]; + +inline const JSCodeSpec& CodeSpec(JSOp op) { + return CodeSpecTable[uint8_t(op)]; +} + +extern const char* const CodeNameTable[]; + +inline const char* CodeName(JSOp op) { return CodeNameTable[uint8_t(op)]; } + +/* Shorthand for type from opcode. */ + +static inline uint32_t JOF_OPTYPE(JSOp op) { + return JOF_TYPE(CodeSpec(op).format); +} + +static inline bool IsJumpOpcode(JSOp op) { return JOF_OPTYPE(op) == JOF_JUMP; } + +static inline bool BytecodeFallsThrough(JSOp op) { + // Note: + // * JSOp::Yield/JSOp::Await is considered to fall through, like JSOp::Call. + switch (op) { + case JSOp::Goto: + case JSOp::Default: + case JSOp::Return: + case JSOp::RetRval: + case JSOp::FinalYieldRval: + case JSOp::Throw: + case JSOp::ThrowMsg: + case JSOp::ThrowSetConst: + case JSOp::TableSwitch: + return false; + default: + return true; + } +} + +static inline bool BytecodeIsJumpTarget(JSOp op) { + switch (op) { + case JSOp::JumpTarget: + case JSOp::LoopHead: + case JSOp::AfterYield: + return true; + default: + return false; + } +} + +// The JSOp argument is superflous, but we are using it to avoid a +// store forwarding Bug on some Android phones; see Bug 1833315 +MOZ_ALWAYS_INLINE unsigned StackUses(JSOp op, jsbytecode* pc) { + MOZ_ASSERT(op == JSOp(*pc)); + int nuses = CodeSpec(op).nuses; + if (nuses >= 0) { + return nuses; + } + + MOZ_ASSERT(nuses == -1); + switch (op) { + case JSOp::PopN: + return GET_UINT16(pc); + case JSOp::New: + case JSOp::NewContent: + case JSOp::SuperCall: + return 2 + GET_ARGC(pc) + 1; + default: + /* stack: fun, this, [argc arguments] */ + MOZ_ASSERT(op == JSOp::Call || op == JSOp::CallContent || + op == JSOp::CallIgnoresRv || op == JSOp::Eval || + op == JSOp::CallIter || op == JSOp::CallContentIter || + op == JSOp::StrictEval); + return 2 + GET_ARGC(pc); + } +} + +MOZ_ALWAYS_INLINE unsigned StackDefs(JSOp op) { + int ndefs = CodeSpec(op).ndefs; + MOZ_ASSERT(ndefs >= 0); + return ndefs; +} + +#if defined(DEBUG) || defined(JS_JITSPEW) +/* + * Given bytecode address pc in script's main program code, compute the operand + * stack depth just before (JSOp) *pc executes. If *pc is not reachable, return + * false. + */ +extern bool ReconstructStackDepth(JSContext* cx, JSScript* script, + jsbytecode* pc, uint32_t* depth, + bool* reachablePC); +#endif + +} /* namespace js */ + +#define JSDVG_IGNORE_STACK 0 +#define JSDVG_SEARCH_STACK 1 + +namespace js { + +/* + * Find the source expression that resulted in v, and return a newly allocated + * C-string containing it. Fall back on v's string conversion (fallback) if we + * can't find the bytecode that generated and pushed v on the operand stack. + * + * Search the current stack frame if spindex is JSDVG_SEARCH_STACK. Don't + * look for v on the stack if spindex is JSDVG_IGNORE_STACK. Otherwise, + * spindex is the negative index of v, measured from cx->fp->sp, or from a + * lower frame's sp if cx->fp is native. + * + * The optional argument skipStackHits can be used to skip a hit in the stack + * frame. This can be useful in self-hosted code that wants to report value + * errors containing decompiled values that are useful for the user, instead of + * values used internally by the self-hosted code. + * + * The caller must call JS_free on the result after a successful call. + */ +UniqueChars DecompileValueGenerator(JSContext* cx, int spindex, HandleValue v, + HandleString fallback, + int skipStackHits = 0); + +/* + * Decompile the formal argument at formalIndex in the nearest non-builtin + * stack frame, falling back with converting v to source. + */ +JSString* DecompileArgument(JSContext* cx, int formalIndex, HandleValue v); + +static inline unsigned GetOpLength(JSOp op) { + MOZ_ASSERT(uint8_t(op) < JSOP_LIMIT); + MOZ_ASSERT(CodeSpec(op).length > 0); + return CodeSpec(op).length; +} + +static inline unsigned GetBytecodeLength(const jsbytecode* pc) { + JSOp op = (JSOp)*pc; + return GetOpLength(op); +} + +static inline bool BytecodeIsPopped(jsbytecode* pc) { + jsbytecode* next = pc + GetBytecodeLength(pc); + return JSOp(*next) == JSOp::Pop; +} + +extern bool IsValidBytecodeOffset(JSContext* cx, JSScript* script, + size_t offset); + +inline bool IsArgOp(JSOp op) { return JOF_OPTYPE(op) == JOF_QARG; } + +inline bool IsLocalOp(JSOp op) { return JOF_OPTYPE(op) == JOF_LOCAL; } + +inline bool IsAliasedVarOp(JSOp op) { return JOF_OPTYPE(op) == JOF_ENVCOORD; } + +inline bool IsGlobalOp(JSOp op) { return CodeSpec(op).format & JOF_GNAME; } + +inline bool IsPropertySetOp(JSOp op) { + return CodeSpec(op).format & JOF_PROPSET; +} + +inline bool IsPropertyInitOp(JSOp op) { + return CodeSpec(op).format & JOF_PROPINIT; +} + +inline bool IsLooseEqualityOp(JSOp op) { + return op == JSOp::Eq || op == JSOp::Ne; +} + +inline bool IsStrictEqualityOp(JSOp op) { + return op == JSOp::StrictEq || op == JSOp::StrictNe; +} + +inline bool IsEqualityOp(JSOp op) { + return IsLooseEqualityOp(op) || IsStrictEqualityOp(op); +} + +inline bool IsRelationalOp(JSOp op) { + return op == JSOp::Lt || op == JSOp::Le || op == JSOp::Gt || op == JSOp::Ge; +} + +inline bool IsCheckStrictOp(JSOp op) { + return CodeSpec(op).format & JOF_CHECKSTRICT; +} + +inline bool IsNameOp(JSOp op) { return CodeSpec(op).format & JOF_NAME; } + +#ifdef DEBUG +inline bool IsCheckSloppyOp(JSOp op) { + return CodeSpec(op).format & JOF_CHECKSLOPPY; +} +#endif + +inline bool IsAtomOp(JSOp op) { return JOF_OPTYPE(op) == JOF_ATOM; } + +inline bool IsGetPropOp(JSOp op) { return op == JSOp::GetProp; } + +inline bool IsGetPropPC(const jsbytecode* pc) { return IsGetPropOp(JSOp(*pc)); } + +inline bool IsHiddenInitOp(JSOp op) { + return op == JSOp::InitHiddenProp || op == JSOp::InitHiddenElem || + op == JSOp::InitHiddenPropGetter || op == JSOp::InitHiddenElemGetter || + op == JSOp::InitHiddenPropSetter || op == JSOp::InitHiddenElemSetter; +} + +inline bool IsLockedInitOp(JSOp op) { + return op == JSOp::InitLockedProp || op == JSOp::InitLockedElem; +} + +inline bool IsStrictSetPC(jsbytecode* pc) { + JSOp op = JSOp(*pc); + return op == JSOp::StrictSetProp || op == JSOp::StrictSetName || + op == JSOp::StrictSetGName || op == JSOp::StrictSetElem; +} + +inline bool IsSetPropOp(JSOp op) { + return op == JSOp::SetProp || op == JSOp::StrictSetProp || + op == JSOp::SetName || op == JSOp::StrictSetName || + op == JSOp::SetGName || op == JSOp::StrictSetGName; +} + +inline bool IsSetPropPC(const jsbytecode* pc) { return IsSetPropOp(JSOp(*pc)); } + +inline bool IsGetElemOp(JSOp op) { return op == JSOp::GetElem; } + +inline bool IsGetElemPC(const jsbytecode* pc) { return IsGetElemOp(JSOp(*pc)); } + +inline bool IsSetElemOp(JSOp op) { + return op == JSOp::SetElem || op == JSOp::StrictSetElem; +} + +inline bool IsSetElemPC(const jsbytecode* pc) { return IsSetElemOp(JSOp(*pc)); } + +inline bool IsElemPC(const jsbytecode* pc) { + return CodeSpec(JSOp(*pc)).format & JOF_ELEM; +} + +inline bool IsInvokeOp(JSOp op) { return CodeSpec(op).format & JOF_INVOKE; } + +inline bool IsInvokePC(jsbytecode* pc) { return IsInvokeOp(JSOp(*pc)); } + +inline bool IsStrictEvalPC(jsbytecode* pc) { + JSOp op = JSOp(*pc); + return op == JSOp::StrictEval || op == JSOp::StrictSpreadEval; +} + +inline bool IsConstructOp(JSOp op) { + return CodeSpec(op).format & JOF_CONSTRUCT; +} +inline bool IsConstructPC(const jsbytecode* pc) { + return IsConstructOp(JSOp(*pc)); +} + +inline bool IsSpreadOp(JSOp op) { return CodeSpec(op).format & JOF_SPREAD; } + +inline bool IsSpreadPC(const jsbytecode* pc) { return IsSpreadOp(JSOp(*pc)); } + +inline bool OpUsesEnvironmentChain(JSOp op) { + return CodeSpec(op).format & JOF_USES_ENV; +} + +static inline int32_t GetBytecodeInteger(jsbytecode* pc) { + switch (JSOp(*pc)) { + case JSOp::Zero: + return 0; + case JSOp::One: + return 1; + case JSOp::Uint16: + return GET_UINT16(pc); + case JSOp::Uint24: + return GET_UINT24(pc); + case JSOp::Int8: + return GET_INT8(pc); + case JSOp::Int32: + return GET_INT32(pc); + default: + MOZ_CRASH("Bad op"); + } +} + +inline bool BytecodeOpHasIC(JSOp op) { return CodeSpec(op).format & JOF_IC; } + +inline void GetCheckPrivateFieldOperands(jsbytecode* pc, + ThrowCondition* throwCondition, + ThrowMsgKind* throwKind) { + static_assert(sizeof(ThrowCondition) == sizeof(uint8_t)); + static_assert(sizeof(ThrowMsgKind) == sizeof(uint8_t)); + + MOZ_ASSERT(JSOp(*pc) == JSOp::CheckPrivateField); + uint8_t throwConditionByte = GET_UINT8(pc); + uint8_t throwKindByte = GET_UINT8(pc + 1); + + *throwCondition = static_cast(throwConditionByte); + *throwKind = static_cast(throwKindByte); + + MOZ_ASSERT(*throwCondition == ThrowCondition::ThrowHas || + *throwCondition == ThrowCondition::ThrowHasNot || + *throwCondition == ThrowCondition::OnlyCheckRhs); + + MOZ_ASSERT(*throwKind == ThrowMsgKind::PrivateDoubleInit || + *throwKind == ThrowMsgKind::PrivateBrandDoubleInit || + *throwKind == ThrowMsgKind::MissingPrivateOnGet || + *throwKind == ThrowMsgKind::MissingPrivateOnSet); +} + +// Return true iff the combination of the ThrowCondition and hasOwn result +// will throw an exception. +static inline bool CheckPrivateFieldWillThrow(ThrowCondition condition, + bool hasOwn) { + if ((condition == ThrowCondition::ThrowHasNot && !hasOwn) || + (condition == ThrowCondition::ThrowHas && hasOwn)) { + // Met a throw condition. + return true; + } + + return false; +} + +/* + * Counts accumulated for a single opcode in a script. The counts tracked vary + * between opcodes, and this structure ensures that counts are accessed in a + * coherent fashion. + */ +class PCCounts { + /* + * Offset of the pc inside the script. This fields is used to lookup opcode + * which have annotations. + */ + size_t pcOffset_; + + /* + * Record the number of execution of one instruction, or the number of + * throws executed. + */ + uint64_t numExec_; + + public: + explicit PCCounts(size_t off) : pcOffset_(off), numExec_(0) {} + + size_t pcOffset() const { return pcOffset_; } + + // Used for sorting and searching. + bool operator<(const PCCounts& rhs) const { + return pcOffset_ < rhs.pcOffset_; + } + + uint64_t& numExec() { return numExec_; } + uint64_t numExec() const { return numExec_; } + + static const char numExecName[]; +}; + +static inline jsbytecode* GetNextPc(jsbytecode* pc) { + return pc + GetBytecodeLength(pc); +} + +inline GeneratorResumeKind IntToResumeKind(int32_t value) { + MOZ_ASSERT(uint32_t(value) <= uint32_t(GeneratorResumeKind::Return)); + return static_cast(value); +} + +inline GeneratorResumeKind ResumeKindFromPC(jsbytecode* pc) { + MOZ_ASSERT(JSOp(*pc) == JSOp::ResumeKind); + return IntToResumeKind(GET_UINT8(pc)); +} + +#if defined(DEBUG) || defined(JS_JITSPEW) + +enum class DisassembleSkeptically { No, Yes }; + +/* + * Disassemblers, for debugging only. + */ +[[nodiscard]] extern bool Disassemble( + JSContext* cx, JS::Handle script, bool lines, Sprinter* sp, + DisassembleSkeptically skeptically = DisassembleSkeptically::No); + +unsigned Disassemble1(JSContext* cx, JS::Handle script, + jsbytecode* pc, unsigned loc, bool lines, Sprinter* sp); + +#endif + +[[nodiscard]] extern bool DumpRealmPCCounts(JSContext* cx); + +} // namespace js + +#endif /* vm_BytecodeUtil_h */ diff --git a/js/src/vm/Caches.h b/js/src/vm/Caches.h new file mode 100644 index 0000000000..c1d9caefdb --- /dev/null +++ b/js/src/vm/Caches.h @@ -0,0 +1,568 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_Caches_h +#define vm_Caches_h + +#include "mozilla/Array.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Maybe.h" +#include "mozilla/MruCache.h" +#include "mozilla/TemplateLib.h" +#include "mozilla/UniquePtr.h" + +#include "frontend/ScopeBindingCache.h" +#include "gc/Tracer.h" +#include "js/RootingAPI.h" +#include "js/TypeDecls.h" +#include "vm/JSScript.h" +#include "vm/Shape.h" +#include "vm/StencilCache.h" // js::StencilCache +#include "vm/StringType.h" + +namespace js { + +class SrcNote; + +/* + * GetSrcNote cache to avoid O(n^2) growth in finding a source note for a + * given pc in a script. We use the script->code pointer to tag the cache, + * instead of the script address itself, so that source notes are always found + * by offset from the bytecode with which they were generated. + */ +struct GSNCache { + typedef HashMap, + SystemAllocPolicy> + Map; + + jsbytecode* code; + Map map; + + GSNCache() : code(nullptr) {} + + void purge(); +}; + +struct EvalCacheEntry { + JSLinearString* str; + JSScript* script; + JSScript* callerScript; + jsbytecode* pc; + + // We sweep this cache after a nursery collection to update entries with + // string keys that have been tenured. + // + // The entire cache is purged on a major GC, so we don't need to sweep it + // then. + bool traceWeak(JSTracer* trc) { + MOZ_ASSERT(trc->kind() == JS::TracerKind::MinorSweeping); + return TraceManuallyBarrieredWeakEdge(trc, &str, "EvalCacheEntry::str"); + } +}; + +struct EvalCacheLookup { + explicit EvalCacheLookup(JSContext* cx) : str(cx), callerScript(cx) {} + Rooted str; + RootedScript callerScript; + MOZ_INIT_OUTSIDE_CTOR jsbytecode* pc; +}; + +struct EvalCacheHashPolicy { + using Lookup = EvalCacheLookup; + + static HashNumber hash(const Lookup& l); + static bool match(const EvalCacheEntry& entry, const EvalCacheLookup& l); +}; + +using EvalCache = + GCHashSet; + +class MegamorphicCacheEntry { + // Receiver object's shape. + Shape* shape_ = nullptr; + + // The atom or symbol property being accessed. + PropertyKey key_; + + // Slot offset and isFixedSlot flag of the data property. + TaggedSlotOffset slotOffset_; + + // This entry is valid iff the generation matches the cache's generation. + uint16_t generation_ = 0; + + // Number of hops on the proto chain to get to the holder object. If this is + // zero, the property exists on the receiver object. It can also be one of + // the sentinel values indicating a missing property lookup. + uint8_t numHops_ = 0; + + friend class MegamorphicCache; + + public: + static constexpr uint8_t MaxHopsForDataProperty = UINT8_MAX - 2; + static constexpr uint8_t NumHopsForMissingProperty = UINT8_MAX - 1; + static constexpr uint8_t NumHopsForMissingOwnProperty = UINT8_MAX; + + void init(Shape* shape, PropertyKey key, uint16_t generation, uint8_t numHops, + TaggedSlotOffset slotOffset) { + shape_ = shape; + key_ = key; + slotOffset_ = slotOffset; + generation_ = generation; + numHops_ = numHops; + MOZ_ASSERT(numHops_ == numHops, "numHops must fit in numHops_"); + } + bool isMissingProperty() const { + return numHops_ == NumHopsForMissingProperty; + } + bool isMissingOwnProperty() const { + return numHops_ == NumHopsForMissingOwnProperty; + } + bool isDataProperty() const { return numHops_ <= MaxHopsForDataProperty; } + uint16_t numHops() const { + MOZ_ASSERT(isDataProperty()); + return numHops_; + } + TaggedSlotOffset slotOffset() const { + MOZ_ASSERT(isDataProperty()); + return slotOffset_; + } + + static constexpr size_t offsetOfShape() { + return offsetof(MegamorphicCacheEntry, shape_); + } + + static constexpr size_t offsetOfKey() { + return offsetof(MegamorphicCacheEntry, key_); + } + + static constexpr size_t offsetOfGeneration() { + return offsetof(MegamorphicCacheEntry, generation_); + } + + static constexpr size_t offsetOfSlotOffset() { + return offsetof(MegamorphicCacheEntry, slotOffset_); + } + + static constexpr size_t offsetOfNumHops() { + return offsetof(MegamorphicCacheEntry, numHops_); + } +}; + +// [SMDOC] Megamorphic Property Lookup Cache (MegamorphicCache) +// +// MegamorphicCache is a data structure used to speed up megamorphic property +// lookups from JIT code. The same cache is currently used for both GetProp and +// HasProp (in, hasOwnProperty) operations. +// +// This is implemented as a fixed-size array of entries. Lookups are performed +// based on the receiver object's Shape + PropertyKey. If found in the cache, +// the result of a lookup represents either: +// +// * A data property on the receiver or on its proto chain (stored as number of +// 'hops' up the proto chain + the slot of the data property). +// +// * A missing property on the receiver or its proto chain. +// +// * A missing property on the receiver, but it might exist on the proto chain. +// This lets us optimize hasOwnProperty better. +// +// Collisions are handled by simply overwriting the previous entry stored in the +// slot. This is sufficient to achieve a high hit rate on typical web workloads +// while ensuring cache lookups are always fast and simple. +// +// Lookups always check the receiver object's shape (ensuring the properties and +// prototype are unchanged). Because the cache also caches lookups on the proto +// chain, Watchtower is used to invalidate the cache when prototype objects are +// mutated. This is done by incrementing the cache's generation counter to +// invalidate all entries. +// +// The cache is also invalidated on each major GC. +class MegamorphicCache { + public: + using Entry = MegamorphicCacheEntry; + + static constexpr size_t NumEntries = 1024; + static constexpr uint8_t ShapeHashShift1 = + mozilla::tl::FloorLog2::value; + static constexpr uint8_t ShapeHashShift2 = + ShapeHashShift1 + mozilla::tl::FloorLog2::value; + + static_assert(mozilla::IsPowerOfTwo(alignof(Shape)) && + mozilla::IsPowerOfTwo(NumEntries), + "FloorLog2 is exact because alignof(Shape) and NumEntries are " + "both powers of two"); + + private: + mozilla::Array entries_; + + // Generation counter used to invalidate all entries. + uint16_t generation_ = 0; + + // NOTE: this logic is mirrored in MacroAssembler::emitMegamorphicCacheLookup + Entry& getEntry(Shape* shape, PropertyKey key) { + static_assert(mozilla::IsPowerOfTwo(NumEntries), + "NumEntries must be a power-of-two for fast modulo"); + uintptr_t hash = uintptr_t(shape) >> ShapeHashShift1; + hash ^= uintptr_t(shape) >> ShapeHashShift2; + hash += HashAtomOrSymbolPropertyKey(key); + return entries_[hash % NumEntries]; + } + + public: + void bumpGeneration() { + generation_++; + if (generation_ == 0) { + // Generation overflowed. Invalidate the whole cache. + for (size_t i = 0; i < NumEntries; i++) { + entries_[i].shape_ = nullptr; + } + } + } + bool lookup(Shape* shape, PropertyKey key, Entry** entryp) { + Entry& entry = getEntry(shape, key); + *entryp = &entry; + return (entry.shape_ == shape && entry.key_ == key && + entry.generation_ == generation_); + } + void initEntryForMissingProperty(Entry* entry, Shape* shape, + PropertyKey key) { + entry->init(shape, key, generation_, Entry::NumHopsForMissingProperty, + TaggedSlotOffset()); + } + void initEntryForMissingOwnProperty(Entry* entry, Shape* shape, + PropertyKey key) { + entry->init(shape, key, generation_, Entry::NumHopsForMissingOwnProperty, + TaggedSlotOffset()); + } + void initEntryForDataProperty(Entry* entry, Shape* shape, PropertyKey key, + size_t numHops, TaggedSlotOffset slotOffset) { + if (numHops > Entry::MaxHopsForDataProperty) { + return; + } + entry->init(shape, key, generation_, numHops, slotOffset); + } + + static constexpr size_t offsetOfEntries() { + return offsetof(MegamorphicCache, entries_); + } + + static constexpr size_t offsetOfGeneration() { + return offsetof(MegamorphicCache, generation_); + } +}; + +class MegamorphicSetPropCacheEntry { + Shape* beforeShape_ = nullptr; + Shape* afterShape_ = nullptr; + + // The atom or symbol property being accessed. + PropertyKey key_; + + // Slot offset and isFixedSlot flag of the data property. + TaggedSlotOffset slotOffset_; + + // If slots need to be grown, this is the new capacity we need. + uint16_t newCapacity_ = 0; + + // This entry is valid iff the generation matches the cache's generation. + uint16_t generation_ = 0; + + friend class MegamorphicSetPropCache; + + public: + void init(Shape* beforeShape, Shape* afterShape, PropertyKey key, + uint16_t generation, TaggedSlotOffset slotOffset, + uint16_t newCapacity) { + beforeShape_ = beforeShape; + afterShape_ = afterShape; + key_ = key; + slotOffset_ = slotOffset; + newCapacity_ = newCapacity; + generation_ = generation; + } + TaggedSlotOffset slotOffset() const { return slotOffset_; } + Shape* afterShape() const { return afterShape_; } + + static constexpr size_t offsetOfShape() { + return offsetof(MegamorphicSetPropCacheEntry, beforeShape_); + } + static constexpr size_t offsetOfAfterShape() { + return offsetof(MegamorphicSetPropCacheEntry, afterShape_); + } + + static constexpr size_t offsetOfKey() { + return offsetof(MegamorphicSetPropCacheEntry, key_); + } + + static constexpr size_t offsetOfNewCapacity() { + return offsetof(MegamorphicSetPropCacheEntry, newCapacity_); + } + + static constexpr size_t offsetOfGeneration() { + return offsetof(MegamorphicSetPropCacheEntry, generation_); + } + + static constexpr size_t offsetOfSlotOffset() { + return offsetof(MegamorphicSetPropCacheEntry, slotOffset_); + } +}; + +class MegamorphicSetPropCache { + public: + using Entry = MegamorphicSetPropCacheEntry; + // We can get more hits if we increase this, but this seems to be around + // the sweet spot where we are getting most of the hits we would get with + // an infinitely sized cache + static constexpr size_t NumEntries = 1024; + static constexpr uint8_t ShapeHashShift1 = + mozilla::tl::FloorLog2::value; + static constexpr uint8_t ShapeHashShift2 = + ShapeHashShift1 + mozilla::tl::FloorLog2::value; + + static_assert(mozilla::IsPowerOfTwo(alignof(Shape)) && + mozilla::IsPowerOfTwo(NumEntries), + "FloorLog2 is exact because alignof(Shape) and NumEntries are " + "both powers of two"); + + private: + mozilla::Array entries_; + + // Generation counter used to invalidate all entries. + uint16_t generation_ = 0; + + Entry& getEntry(Shape* beforeShape, PropertyKey key) { + static_assert(mozilla::IsPowerOfTwo(NumEntries), + "NumEntries must be a power-of-two for fast modulo"); + uintptr_t hash = uintptr_t(beforeShape) >> ShapeHashShift1; + hash ^= uintptr_t(beforeShape) >> ShapeHashShift2; + hash += HashAtomOrSymbolPropertyKey(key); + return entries_[hash % NumEntries]; + } + + public: + void bumpGeneration() { + generation_++; + if (generation_ == 0) { + // Generation overflowed. Invalidate the whole cache. + for (size_t i = 0; i < NumEntries; i++) { + entries_[i].beforeShape_ = nullptr; + } + } + } + void set(Shape* beforeShape, Shape* afterShape, PropertyKey key, + TaggedSlotOffset slotOffset, uint32_t newCapacity) { + uint16_t newSlots = (uint16_t)newCapacity; + if (newSlots != newCapacity) { + return; + } + Entry& entry = getEntry(beforeShape, key); + entry.init(beforeShape, afterShape, key, generation_, slotOffset, newSlots); + } + +#ifdef DEBUG + bool lookup(Shape* beforeShape, PropertyKey key, Entry** entryp) { + Entry& entry = getEntry(beforeShape, key); + *entryp = &entry; + return (entry.beforeShape_ == beforeShape && entry.key_ == key && + entry.generation_ == generation_); + } +#endif + + static constexpr size_t offsetOfEntries() { + return offsetof(MegamorphicSetPropCache, entries_); + } + + static constexpr size_t offsetOfGeneration() { + return offsetof(MegamorphicSetPropCache, generation_); + } +}; + +// Cache for AtomizeString, mapping JSString* or JS::Latin1Char* to the +// corresponding JSAtom*. The cache has three different optimizations: +// +// * The two most recent lookups are cached. This has a hit rate of 30-65% on +// typical web workloads. +// +// * MruCache is used for short JS::Latin1Char strings. +// +// * For longer strings, there's also a JSLinearString* => JSAtom* HashMap, +// because hashing the string characters repeatedly can be slow. +// This map is also used by nursery GC to de-duplicate strings to atoms. +// +// This cache is purged on minor and major GC. +class StringToAtomCache { + public: + struct LastLookup { + JSString* string = nullptr; + JSAtom* atom = nullptr; + + static constexpr size_t offsetOfString() { + return offsetof(LastLookup, string); + } + + static constexpr size_t offsetOfAtom() { + return offsetof(LastLookup, atom); + } + }; + static constexpr size_t NumLastLookups = 2; + + struct AtomTableKey { + explicit AtomTableKey(const JS::Latin1Char* str, size_t len) + : string_(str), length_(len) { + hash_ = mozilla::HashString(string_, length_); + } + + const JS::Latin1Char* string_; + size_t length_; + uint32_t hash_; + }; + + private: + struct RopeAtomCache + : public mozilla::MruCache { + static HashNumber Hash(const AtomTableKey& key) { return key.hash_; } + static bool Match(const AtomTableKey& key, const JSAtom* val) { + JS::AutoCheckCannotGC nogc; + return val->length() == key.length_ && + EqualChars(key.string_, val->latin1Chars(nogc), key.length_); + } + }; + using Map = + HashMap, SystemAllocPolicy>; + Map map_; + mozilla::Array lastLookups_; + RopeAtomCache ropeCharCache_; + + public: + // Don't use the HashMap for short strings. Hashing them is less expensive. + // But the length needs to long enough to cover common identifiers in React. + static constexpr size_t MinStringLength = 39; + + JSAtom* lookupInMap(JSString* s) const { + MOZ_ASSERT(s->inStringToAtomCache()); + MOZ_ASSERT(s->length() >= MinStringLength); + + auto p = map_.lookup(s); + JSAtom* atom = p ? p->value() : nullptr; + return atom; + } + + MOZ_ALWAYS_INLINE JSAtom* lookup(JSString* s) const { + MOZ_ASSERT(!s->isAtom()); + for (const LastLookup& entry : lastLookups_) { + if (entry.string == s) { + return entry.atom; + } + } + + if (!s->inStringToAtomCache()) { + MOZ_ASSERT(!map_.lookup(s)); + return nullptr; + } + + return lookupInMap(s); + } + + MOZ_ALWAYS_INLINE JSAtom* lookupWithRopeChars( + const JS::Latin1Char* str, size_t len, + mozilla::Maybe& key) { + MOZ_ASSERT(len < MinStringLength); + key.emplace(str, len); + if (auto p = ropeCharCache_.Lookup(key.value())) { + return p.Data(); + } + return nullptr; + } + + static constexpr size_t offsetOfLastLookups() { + return offsetof(StringToAtomCache, lastLookups_); + } + + void maybePut(JSString* s, JSAtom* atom, mozilla::Maybe& key) { + if (key.isSome()) { + ropeCharCache_.Put(key.value(), atom); + } + + for (size_t i = NumLastLookups - 1; i > 0; i--) { + lastLookups_[i] = lastLookups_[i - 1]; + } + lastLookups_[0].string = s; + lastLookups_[0].atom = atom; + + if (s->length() < MinStringLength) { + return; + } + if (!map_.putNew(s, atom)) { + return; + } + s->setInStringToAtomCache(); + } + + void purge() { + map_.clearAndCompact(); + for (LastLookup& entry : lastLookups_) { + entry.string = nullptr; + entry.atom = nullptr; + } + + ropeCharCache_.Clear(); + } +}; + +class RuntimeCaches { + public: + MegamorphicCache megamorphicCache; + UniquePtr megamorphicSetPropCache; + GSNCache gsnCache; + UncompressedSourceCache uncompressedSourceCache; + EvalCache evalCache; + StringToAtomCache stringToAtomCache; + + // Delazification: Cache binding for runtime objects which are used during + // delazification to quickly resolve NameLocation of bindings without linearly + // iterating over the list of bindings. + frontend::RuntimeScopeBindingCache scopeCache; + + // This cache is used to store the result of delazification compilations which + // might be happening off-thread. The main-thread will concurrently read the + // content of this cache to avoid delazification, or fallback on running the + // delazification on the main-thread. + // + // Main-thread results are not stored in the StencilCache as there is no other + // consumer. + StencilCache delazificationCache; + + void sweepAfterMinorGC(JSTracer* trc) { evalCache.traceWeak(trc); } +#ifdef JSGC_HASH_TABLE_CHECKS + void checkEvalCacheAfterMinorGC(); +#endif + + void purgeForCompaction() { + evalCache.clear(); + stringToAtomCache.purge(); + megamorphicCache.bumpGeneration(); + if (megamorphicSetPropCache) { + // MegamorphicSetPropCache can be null if we failed out of + // JSRuntime::init. We will then try to destroy the runtime which will + // do a GC and land us here. + megamorphicSetPropCache->bumpGeneration(); + } + scopeCache.purge(); + } + + void purgeStencils() { delazificationCache.clearAndDisable(); } + + void purge() { + purgeForCompaction(); + gsnCache.purge(); + uncompressedSourceCache.purge(); + purgeStencils(); + } +}; + +} // namespace js + +#endif /* vm_Caches_h */ diff --git a/js/src/vm/CallAndConstruct.cpp b/js/src/vm/CallAndConstruct.cpp new file mode 100644 index 0000000000..be714e3dd0 --- /dev/null +++ b/js/src/vm/CallAndConstruct.cpp @@ -0,0 +1,168 @@ +/* -*- Mode.h: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "js/CallAndConstruct.h" + +#include "mozilla/Assertions.h" // MOZ_ASSERT + +#include "jstypes.h" // JS_PUBLIC_API +#include "gc/Zone.h" // js::Zone +#include "js/Context.h" // AssertHeapIsIdle +#include "js/friend/ErrorMessages.h" // JSMSG_* +#include "js/RootingAPI.h" // JS::Rooted, JS::Handle, JS::MutableHandle +#include "js/Value.h" // JS::Value, JS::*Value +#include "js/ValueArray.h" // JS::HandleValueArray +#include "vm/BytecodeUtil.h" // JSDVG_IGNORE_STACK +#include "vm/Interpreter.h" // js::Call, js::Construct +#include "vm/JSAtom.h" // JSAtom, js::Atomize +#include "vm/JSContext.h" // JSContext, CHECK_THREAD, ReportValueError +#include "vm/JSObject.h" // JSObject +#include "vm/Stack.h" // js::InvokeArgs, js::FillArgumentsFromArraylike, js::ConstructArgs + +#include "vm/JSContext-inl.h" // JSContext::check +#include "vm/JSObject-inl.h" // js::IsConstructor +#include "vm/ObjectOperations-inl.h" // js::GetProperty + +using namespace js; + +JS_PUBLIC_API bool JS::IsCallable(JSObject* obj) { return obj->isCallable(); } + +JS_PUBLIC_API bool JS::IsConstructor(JSObject* obj) { + return obj->isConstructor(); +} + +JS_PUBLIC_API bool JS_CallFunctionValue(JSContext* cx, + JS::Handle obj, + JS::Handle fval, + const JS::HandleValueArray& args, + JS::MutableHandle rval) { + MOZ_ASSERT(!cx->zone()->isAtomsZone()); + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj, fval, args); + + js::InvokeArgs iargs(cx); + if (!FillArgumentsFromArraylike(cx, iargs, args)) { + return false; + } + + JS::Rooted thisv(cx, JS::ObjectOrNullValue(obj)); + return js::Call(cx, fval, thisv, iargs, rval); +} + +JS_PUBLIC_API bool JS_CallFunction(JSContext* cx, JS::Handle obj, + JS::Handle fun, + const JS::HandleValueArray& args, + JS::MutableHandle rval) { + MOZ_ASSERT(!cx->zone()->isAtomsZone()); + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj, fun, args); + + js::InvokeArgs iargs(cx); + if (!FillArgumentsFromArraylike(cx, iargs, args)) { + return false; + } + + JS::Rooted fval(cx, JS::ObjectValue(*fun)); + JS::Rooted thisv(cx, JS::ObjectOrNullValue(obj)); + return js::Call(cx, fval, thisv, iargs, rval); +} + +JS_PUBLIC_API bool JS_CallFunctionName(JSContext* cx, JS::Handle obj, + const char* name, + const JS::HandleValueArray& args, + JS::MutableHandle rval) { + MOZ_ASSERT(!cx->zone()->isAtomsZone()); + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj, args); + + JSAtom* atom = Atomize(cx, name, strlen(name)); + if (!atom) { + return false; + } + + JS::Rooted fval(cx); + JS::Rooted id(cx, AtomToId(atom)); + if (!GetProperty(cx, obj, obj, id, &fval)) { + return false; + } + + js::InvokeArgs iargs(cx); + if (!FillArgumentsFromArraylike(cx, iargs, args)) { + return false; + } + + JS::Rooted thisv(cx, JS::ObjectOrNullValue(obj)); + return js::Call(cx, fval, thisv, iargs, rval); +} + +JS_PUBLIC_API bool JS::Call(JSContext* cx, JS::Handle thisv, + JS::Handle fval, + const JS::HandleValueArray& args, + JS::MutableHandle rval) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(thisv, fval, args); + + js::InvokeArgs iargs(cx); + if (!FillArgumentsFromArraylike(cx, iargs, args)) { + return false; + } + + return js::Call(cx, fval, thisv, iargs, rval); +} + +JS_PUBLIC_API bool JS::Construct(JSContext* cx, JS::Handle fval, + JS::Handle newTarget, + const JS::HandleValueArray& args, + JS::MutableHandle objp) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(fval, newTarget, args); + + if (!js::IsConstructor(fval)) { + ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, fval, + nullptr); + return false; + } + + JS::Rooted newTargetVal(cx, JS::ObjectValue(*newTarget)); + if (!js::IsConstructor(newTargetVal)) { + ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, + newTargetVal, nullptr); + return false; + } + + js::ConstructArgs cargs(cx); + if (!FillArgumentsFromArraylike(cx, cargs, args)) { + return false; + } + + return js::Construct(cx, fval, cargs, newTargetVal, objp); +} + +JS_PUBLIC_API bool JS::Construct(JSContext* cx, JS::Handle fval, + const JS::HandleValueArray& args, + JS::MutableHandle objp) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(fval, args); + + if (!js::IsConstructor(fval)) { + ReportValueError(cx, JSMSG_NOT_CONSTRUCTOR, JSDVG_IGNORE_STACK, fval, + nullptr); + return false; + } + + js::ConstructArgs cargs(cx); + if (!FillArgumentsFromArraylike(cx, cargs, args)) { + return false; + } + + return js::Construct(cx, fval, cargs, fval, objp); +} diff --git a/js/src/vm/CallNonGenericMethod.cpp b/js/src/vm/CallNonGenericMethod.cpp new file mode 100644 index 0000000000..2164f7162b --- /dev/null +++ b/js/src/vm/CallNonGenericMethod.cpp @@ -0,0 +1,35 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "js/CallNonGenericMethod.h" + +#include "proxy/Proxy.h" +#include "vm/JSFunction.h" +#include "vm/JSObject.h" +#include "vm/ProxyObject.h" +#include "vm/SelfHosting.h" + +using namespace js; + +bool JS::detail::CallMethodIfWrapped(JSContext* cx, IsAcceptableThis test, + NativeImpl impl, const CallArgs& args) { + HandleValue thisv = args.thisv(); + MOZ_ASSERT(!test(thisv)); + + if (thisv.isObject()) { + JSObject& thisObj = args.thisv().toObject(); + if (thisObj.is()) { + return Proxy::nativeCall(cx, test, impl, args); + } + } + + if (IsCallSelfHostedNonGenericMethod(impl)) { + return ReportIncompatibleSelfHostedMethod(cx, thisv); + } + + ReportIncompatible(cx, args); + return false; +} diff --git a/js/src/vm/CharacterEncoding.cpp b/js/src/vm/CharacterEncoding.cpp new file mode 100644 index 0000000000..52edcae45e --- /dev/null +++ b/js/src/vm/CharacterEncoding.cpp @@ -0,0 +1,888 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "js/CharacterEncoding.h" + +#include "mozilla/CheckedInt.h" +#include "mozilla/DebugOnly.h" +#include "mozilla/Latin1.h" +#include "mozilla/Maybe.h" +#include "mozilla/Range.h" +#include "mozilla/Span.h" +#include "mozilla/Sprintf.h" +#include "mozilla/TextUtils.h" +#include "mozilla/Utf8.h" + +#ifndef XP_LINUX +// We still support libstd++ versions without codecvt support on Linux. +# include +#endif +#include +#include +#include +#include + +#include "frontend/FrontendContext.h" +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "util/StringBuffer.h" +#include "util/Unicode.h" // unicode::REPLACEMENT_CHARACTER +#include "vm/JSContext.h" + +using mozilla::AsChars; +using mozilla::AsciiValidUpTo; +using mozilla::AsWritableChars; +using mozilla::ConvertLatin1toUtf8Partial; +using mozilla::ConvertUtf16toUtf8Partial; +using mozilla::IsAscii; +using mozilla::IsUtf8Latin1; +using mozilla::LossyConvertUtf16toLatin1; +using mozilla::Span; +using mozilla::Utf8Unit; + +using JS::Latin1CharsZ; +using JS::TwoByteCharsZ; +using JS::UTF8Chars; +using JS::UTF8CharsZ; + +using namespace js; +using namespace js::unicode; + +Latin1CharsZ JS::LossyTwoByteCharsToNewLatin1CharsZ( + JSContext* cx, const mozilla::Range tbchars) { + MOZ_ASSERT(cx); + size_t len = tbchars.length(); + unsigned char* latin1 = cx->pod_malloc(len + 1); + if (!latin1) { + return Latin1CharsZ(); + } + LossyConvertUtf16toLatin1(tbchars, AsWritableChars(Span(latin1, len))); + latin1[len] = '\0'; + return Latin1CharsZ(latin1, len); +} + +template +static size_t GetDeflatedUTF8StringLength(const CharT* chars, size_t nchars) { + size_t nbytes = nchars; + for (const CharT* end = chars + nchars; chars < end; chars++) { + char16_t c = *chars; + if (c < 0x80) { + continue; + } + char32_t v; + if (IsSurrogate(c)) { + /* nbytes sets 1 length since this is surrogate pair. */ + if (IsTrailSurrogate(c) || (chars + 1) == end) { + nbytes += 2; /* Bad Surrogate */ + continue; + } + char16_t c2 = chars[1]; + if (!IsTrailSurrogate(c2)) { + nbytes += 2; /* Bad Surrogate */ + continue; + } + v = UTF16Decode(c, c2); + nbytes--; + chars++; + } else { + v = c; + } + v >>= 11; + nbytes++; + while (v) { + v >>= 5; + nbytes++; + } + } + return nbytes; +} + +JS_PUBLIC_API size_t JS::GetDeflatedUTF8StringLength(JSLinearString* s) { + JS::AutoCheckCannotGC nogc; + return s->hasLatin1Chars() + ? ::GetDeflatedUTF8StringLength(s->latin1Chars(nogc), s->length()) + : ::GetDeflatedUTF8StringLength(s->twoByteChars(nogc), + s->length()); +} + +JS_PUBLIC_API size_t JS::DeflateStringToUTF8Buffer(JSLinearString* src, + mozilla::Span dst) { + JS::AutoCheckCannotGC nogc; + if (src->hasLatin1Chars()) { + auto source = AsChars(Span(src->latin1Chars(nogc), src->length())); + auto [read, written] = ConvertLatin1toUtf8Partial(source, dst); + (void)read; + return written; + } + auto source = Span(src->twoByteChars(nogc), src->length()); + auto [read, written] = ConvertUtf16toUtf8Partial(source, dst); + (void)read; + return written; +} + +template +void ConvertToUTF8(mozilla::Span src, mozilla::Span dst); + +template <> +void ConvertToUTF8(mozilla::Span src, + mozilla::Span dst) { + (void)ConvertUtf16toUtf8Partial(src, dst); +} + +template <> +void ConvertToUTF8(mozilla::Span src, + mozilla::Span dst) { + (void)ConvertLatin1toUtf8Partial(AsChars(src), dst); +} + +template +UTF8CharsZ JS::CharsToNewUTF8CharsZ(Allocator* alloc, + const mozilla::Range chars) { + /* Get required buffer size. */ + const CharT* str = chars.begin().get(); + size_t len = ::GetDeflatedUTF8StringLength(str, chars.length()); + + /* Allocate buffer. */ + char* utf8 = alloc->template pod_malloc(len + 1); + if (!utf8) { + return UTF8CharsZ(); + } + + /* Encode to UTF8. */ + ::ConvertToUTF8(Span(str, chars.length()), Span(utf8, len)); + utf8[len] = '\0'; + + return UTF8CharsZ(utf8, len); +} + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + JSContext* cx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + JSContext* cx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + JSContext* cx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + JSContext* cx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + FrontendAllocator* cx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + FrontendAllocator* cx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + FrontendAllocator* cx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + FrontendAllocator* cx, const mozilla::Range chars); + +static constexpr uint32_t INVALID_UTF8 = std::numeric_limits::max(); + +/* + * Convert a UTF-8 character sequence into a UCS-4 character and return that + * character. It is assumed that the caller already checked that the sequence + * is valid. + */ +static char32_t Utf8ToOneUcs4CharImpl(const uint8_t* utf8Buffer, + int utf8Length) { + MOZ_ASSERT(1 <= utf8Length && utf8Length <= 4); + + if (utf8Length == 1) { + MOZ_ASSERT(!(*utf8Buffer & 0x80)); + return *utf8Buffer; + } + + /* from Unicode 3.1, non-shortest form is illegal */ + static const char32_t minucs4Table[] = {0x80, 0x800, NonBMPMin}; + + MOZ_ASSERT((*utf8Buffer & (0x100 - (1 << (7 - utf8Length)))) == + (0x100 - (1 << (8 - utf8Length)))); + char32_t ucs4Char = *utf8Buffer++ & ((1 << (7 - utf8Length)) - 1); + char32_t minucs4Char = minucs4Table[utf8Length - 2]; + while (--utf8Length) { + MOZ_ASSERT((*utf8Buffer & 0xC0) == 0x80); + ucs4Char = (ucs4Char << 6) | (*utf8Buffer++ & 0x3F); + } + + if (MOZ_UNLIKELY(ucs4Char < minucs4Char)) { + return INVALID_UTF8; + } + + if (MOZ_UNLIKELY(IsSurrogate(ucs4Char))) { + return INVALID_UTF8; + } + + return ucs4Char; +} + +char32_t JS::Utf8ToOneUcs4Char(const uint8_t* utf8Buffer, int utf8Length) { + return Utf8ToOneUcs4CharImpl(utf8Buffer, utf8Length); +} + +static void ReportInvalidCharacter(JSContext* cx, uint32_t offset) { + char buffer[10]; + SprintfLiteral(buffer, "%u", offset); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_MALFORMED_UTF8_CHAR, buffer); +} + +static void ReportBufferTooSmall(JSContext* cx, uint32_t dummy) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BUFFER_TOO_SMALL); +} + +static void ReportTooBigCharacter(JSContext* cx, uint32_t v) { + char buffer[11]; + SprintfLiteral(buffer, "0x%x", v); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_UTF8_CHAR_TOO_LARGE, buffer); +} + +enum class LoopDisposition { + Break, + Continue, +}; + +enum class OnUTF8Error { + InsertReplacementCharacter, + InsertQuestionMark, + Throw, + Crash, +}; + +// Scan UTF-8 input and (internally, at least) convert it to a series of UTF-16 +// code units. But you can also do odd things like pass an empty lambda for +// `dst`, in which case the output is discarded entirely--the only effect of +// calling the template that way is error-checking. +template +static bool InflateUTF8ToUTF16(JSContext* cx, const UTF8Chars src, + OutputFn dst) { + size_t srclen = src.length(); + for (uint32_t i = 0; i < srclen; i++) { + uint32_t v = uint32_t(src[i]); + if (!(v & 0x80)) { + // ASCII code unit. Simple copy. + if (dst(uint16_t(v)) == LoopDisposition::Break) { + break; + } + } else { + // Non-ASCII code unit. Determine its length in bytes (n). + uint32_t n = 1; + while (v & (0x80 >> n)) { + n++; + } + +#define INVALID(report, arg, n2) \ + do { \ + if (ErrorAction == OnUTF8Error::Throw) { \ + report(cx, arg); \ + return false; \ + } else if (ErrorAction == OnUTF8Error::Crash) { \ + MOZ_CRASH("invalid UTF-8 string: " #report); \ + } else { \ + char16_t replacement; \ + if (ErrorAction == OnUTF8Error::InsertReplacementCharacter) { \ + replacement = REPLACEMENT_CHARACTER; \ + } else { \ + MOZ_ASSERT(ErrorAction == OnUTF8Error::InsertQuestionMark); \ + replacement = '?'; \ + } \ + if (dst(replacement) == LoopDisposition::Break) { \ + break; \ + } \ + n = n2; \ + goto invalidMultiByteCodeUnit; \ + } \ + } while (0) + + // Check the leading byte. + if (n < 2 || n > 4) { + INVALID(ReportInvalidCharacter, i, 1); + } + + // Check that |src| is large enough to hold an n-byte code unit. + if (i + n > srclen) { + INVALID(ReportBufferTooSmall, /* dummy = */ 0, 1); + } + + // Check the second byte. From Unicode Standard v6.2, Table 3-7 + // Well-Formed UTF-8 Byte Sequences. + if ((v == 0xE0 && ((uint8_t)src[i + 1] & 0xE0) != 0xA0) || // E0 A0~BF + (v == 0xED && ((uint8_t)src[i + 1] & 0xE0) != 0x80) || // ED 80~9F + (v == 0xF0 && ((uint8_t)src[i + 1] & 0xF0) == 0x80) || // F0 90~BF + (v == 0xF4 && ((uint8_t)src[i + 1] & 0xF0) != 0x80)) // F4 80~8F + { + INVALID(ReportInvalidCharacter, i, 1); + } + + // Check the continuation bytes. + for (uint32_t m = 1; m < n; m++) { + if ((src[i + m] & 0xC0) != 0x80) { + INVALID(ReportInvalidCharacter, i, m); + } + } + + // Determine the code unit's length in CharT and act accordingly. + v = Utf8ToOneUcs4CharImpl((uint8_t*)&src[i], n); + if (v < NonBMPMin) { + // The n-byte UTF8 code unit will fit in a single CharT. + if (dst(char16_t(v)) == LoopDisposition::Break) { + break; + } + } else if (v <= NonBMPMax) { + // The n-byte UTF8 code unit will fit in two CharT units. + if (dst(LeadSurrogate(v)) == LoopDisposition::Break) { + break; + } + if (dst(TrailSurrogate(v)) == LoopDisposition::Break) { + break; + } + } else { + // The n-byte UTF8 code unit won't fit in two CharT units. + INVALID(ReportTooBigCharacter, v, 1); + } + + invalidMultiByteCodeUnit: + // Move i to the last byte of the multi-byte code unit; the loop + // header will do the final i++ to move to the start of the next + // code unit. + i += n - 1; + } + } + + return true; +} + +template +static void CopyAndInflateUTF8IntoBuffer(JSContext* cx, const UTF8Chars src, + CharT* dst, size_t outlen, + bool allASCII) { + if (allASCII) { + size_t srclen = src.length(); + MOZ_ASSERT(outlen == srclen); + for (uint32_t i = 0; i < srclen; i++) { + dst[i] = CharT(src[i]); + } + } else { + size_t j = 0; + auto push = [dst, &j](char16_t c) -> LoopDisposition { + dst[j++] = CharT(c); + return LoopDisposition::Continue; + }; + MOZ_ALWAYS_TRUE((InflateUTF8ToUTF16(cx, src, push))); + MOZ_ASSERT(j == outlen); + } +} + +template +static CharsT InflateUTF8StringHelper(JSContext* cx, const UTF8Chars src, + size_t* outlen, arena_id_t destArenaId) { + using CharT = typename CharsT::CharT; + static_assert( + std::is_same_v || std::is_same_v, + "bad CharT"); + + *outlen = 0; + + size_t len = 0; + bool allASCII = true; + auto count = [&len, &allASCII](char16_t c) -> LoopDisposition { + len++; + allASCII &= (c < 0x80); + return LoopDisposition::Continue; + }; + if (!InflateUTF8ToUTF16(cx, src, count)) { + return CharsT(); + } + *outlen = len; + + CharT* dst = cx->pod_arena_malloc(destArenaId, + *outlen + 1); // +1 for NUL + + if (!dst) { + ReportOutOfMemory(cx); + return CharsT(); + } + + constexpr OnUTF8Error errorMode = + std::is_same_v + ? OnUTF8Error::InsertQuestionMark + : OnUTF8Error::InsertReplacementCharacter; + CopyAndInflateUTF8IntoBuffer(cx, src, dst, *outlen, allASCII); + dst[*outlen] = CharT('\0'); + + return CharsT(dst, *outlen); +} + +TwoByteCharsZ JS::UTF8CharsToNewTwoByteCharsZ(JSContext* cx, + const UTF8Chars utf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper( + cx, utf8, outlen, destArenaId); +} + +TwoByteCharsZ JS::UTF8CharsToNewTwoByteCharsZ(JSContext* cx, + const ConstUTF8CharsZ& utf8, + size_t* outlen, + arena_id_t destArenaId) { + UTF8Chars chars(utf8.c_str(), strlen(utf8.c_str())); + return InflateUTF8StringHelper( + cx, chars, outlen, destArenaId); +} + +TwoByteCharsZ JS::LossyUTF8CharsToNewTwoByteCharsZ(JSContext* cx, + const JS::UTF8Chars utf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper(cx, utf8, outlen, destArenaId); +} + +TwoByteCharsZ JS::LossyUTF8CharsToNewTwoByteCharsZ( + JSContext* cx, const JS::ConstUTF8CharsZ& utf8, size_t* outlen, + arena_id_t destArenaId) { + UTF8Chars chars(utf8.c_str(), strlen(utf8.c_str())); + return InflateUTF8StringHelper(cx, chars, outlen, destArenaId); +} + +static void UpdateSmallestEncodingForChar(char16_t c, + JS::SmallestEncoding* encoding) { + JS::SmallestEncoding newEncoding = JS::SmallestEncoding::ASCII; + if (c >= 0x80) { + if (c < 0x100) { + newEncoding = JS::SmallestEncoding::Latin1; + } else { + newEncoding = JS::SmallestEncoding::UTF16; + } + } + if (newEncoding > *encoding) { + *encoding = newEncoding; + } +} + +JS::SmallestEncoding JS::FindSmallestEncoding(UTF8Chars utf8) { + Span unsignedSpan = utf8; + auto charSpan = AsChars(unsignedSpan); + size_t upTo = AsciiValidUpTo(charSpan); + if (upTo == charSpan.Length()) { + return SmallestEncoding::ASCII; + } + if (IsUtf8Latin1(charSpan.From(upTo))) { + return SmallestEncoding::Latin1; + } + return SmallestEncoding::UTF16; +} + +Latin1CharsZ JS::UTF8CharsToNewLatin1CharsZ(JSContext* cx, const UTF8Chars utf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper( + cx, utf8, outlen, destArenaId); +} + +Latin1CharsZ JS::LossyUTF8CharsToNewLatin1CharsZ(JSContext* cx, + const UTF8Chars utf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper( + cx, utf8, outlen, destArenaId); +} + +/** + * Atomization Helpers. + * + * These functions are extremely single-use, and are not intended for general + * consumption. + */ + +bool GetUTF8AtomizationData(JSContext* cx, const JS::UTF8Chars utf8, + size_t* outlen, JS::SmallestEncoding* encoding, + HashNumber* hashNum) { + *outlen = 0; + *encoding = JS::SmallestEncoding::ASCII; + *hashNum = 0; + + auto getMetadata = [outlen, encoding, + hashNum](char16_t c) -> LoopDisposition { + (*outlen)++; + UpdateSmallestEncodingForChar(c, encoding); + *hashNum = mozilla::AddToHash(*hashNum, c); + return LoopDisposition::Continue; + }; + if (!InflateUTF8ToUTF16(cx, utf8, getMetadata)) { + return false; + } + + return true; +} + +template +bool UTF8EqualsChars(const JS::UTF8Chars utfChars, const CharT* chars) { + size_t ind = 0; + bool isEqual = true; + + auto checkEqual = [&isEqual, &ind, chars](char16_t c) -> LoopDisposition { +#ifdef DEBUG + JS::SmallestEncoding encoding = JS::SmallestEncoding::ASCII; + UpdateSmallestEncodingForChar(c, &encoding); + if (std::is_same_v) { + MOZ_ASSERT(encoding <= JS::SmallestEncoding::Latin1); + } else if (!std::is_same_v) { + MOZ_CRASH("Invalid character type in UTF8EqualsChars"); + } +#endif + + if (CharT(c) != chars[ind]) { + isEqual = false; + return LoopDisposition::Break; + } + + ind++; + return LoopDisposition::Continue; + }; + + // To get here, you must have checked your work. + InflateUTF8ToUTF16(/* cx = */ nullptr, utfChars, + checkEqual); + + return isEqual; +} + +template bool UTF8EqualsChars(const JS::UTF8Chars, const char16_t*); +template bool UTF8EqualsChars(const JS::UTF8Chars, const JS::Latin1Char*); + +template +void InflateUTF8CharsToBuffer(const JS::UTF8Chars src, CharT* dst, + size_t dstLen, JS::SmallestEncoding encoding) { + CopyAndInflateUTF8IntoBuffer( + /* cx = */ nullptr, src, dst, dstLen, + encoding == JS::SmallestEncoding::ASCII); +} + +template void InflateUTF8CharsToBuffer(const UTF8Chars src, char16_t* dst, + size_t dstLen, + JS::SmallestEncoding encoding); +template void InflateUTF8CharsToBuffer(const UTF8Chars src, JS::Latin1Char* dst, + size_t dstLen, + JS::SmallestEncoding encoding); + +#ifdef DEBUG +void JS::ConstUTF8CharsZ::validate(size_t aLength) { + MOZ_ASSERT(data_); + UTF8Chars chars(data_, aLength); + auto nop = [](char16_t) -> LoopDisposition { + return LoopDisposition::Continue; + }; + InflateUTF8ToUTF16(/* cx = */ nullptr, chars, nop); +} +#endif + +bool JS::StringIsASCII(const char* s) { + while (*s) { + if (*s & 0x80) { + return false; + } + s++; + } + return true; +} + +bool JS::StringIsASCII(Span s) { return IsAscii(s); } + +JS_PUBLIC_API JS::UniqueChars JS::EncodeNarrowToUtf8(JSContext* cx, + const char* chars) { + // Convert the narrow multibyte character string to a wide string and then + // use EncodeWideToUtf8() to convert the wide string to a UTF-8 string. + + std::mbstate_t mb{}; + + // NOTE: The 2nd parameter is overwritten even if the 1st parameter is nullptr + // on Android NDK older than v16. Use a temporary variable to save the + // `chars` for the subsequent call. See bug 1492090. + const char* tmpChars = chars; + + size_t wideLen = std::mbsrtowcs(nullptr, &tmpChars, 0, &mb); + if (wideLen == size_t(-1)) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_CANT_CONVERT_TO_WIDE); + return nullptr; + } + MOZ_ASSERT(std::mbsinit(&mb), + "multi-byte state is in its initial state when no conversion " + "error occured"); + + size_t bufLen = wideLen + 1; + auto wideChars = cx->make_pod_array(bufLen); + if (!wideChars) { + return nullptr; + } + + mozilla::DebugOnly actualLen = + std::mbsrtowcs(wideChars.get(), &chars, bufLen, &mb); + MOZ_ASSERT(wideLen == actualLen); + MOZ_ASSERT(wideChars[actualLen] == '\0'); + + return EncodeWideToUtf8(cx, wideChars.get()); +} + +JS_PUBLIC_API JS::UniqueChars JS::EncodeWideToUtf8(JSContext* cx, + const wchar_t* chars) { + using CheckedSizeT = mozilla::CheckedInt; + +#ifndef XP_LINUX + // Use the standard codecvt facet to convert a wide string to UTF-8. + std::codecvt_utf8 cv; + + size_t len = std::wcslen(chars); + CheckedSizeT utf8MaxLen = CheckedSizeT(len) * cv.max_length(); + CheckedSizeT utf8BufLen = utf8MaxLen + 1; + if (!utf8BufLen.isValid()) { + JS_ReportAllocationOverflow(cx); + return nullptr; + } + auto utf8 = cx->make_pod_array(utf8BufLen.value()); + if (!utf8) { + return nullptr; + } + + // STL returns |codecvt_base::partial| for empty strings. + if (len == 0) { + return utf8; + } + + std::mbstate_t mb{}; + const wchar_t* fromNext; + char* toNext; + std::codecvt_base::result result = + cv.out(mb, chars, chars + len, fromNext, utf8.get(), + utf8.get() + utf8MaxLen.value(), toNext); + if (result != std::codecvt_base::ok) { + MOZ_ASSERT(result == std::codecvt_base::error); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_CANT_CONVERT_WIDE_TO_UTF8); + return nullptr; + } + *toNext = '\0'; // Explicit null-termination required. + + // codecvt_utf8 doesn't validate its output and may produce WTF-8 instead + // of UTF-8 on some platforms when the input contains unpaired surrogate + // characters. We don't allow this. + if (!mozilla::IsUtf8( + mozilla::Span(utf8.get(), size_t(toNext - utf8.get())))) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_CANT_CONVERT_WIDE_TO_UTF8); + return nullptr; + } + + return utf8; +#else + static_assert(sizeof(wchar_t) == 4, + "Assume wchar_t is UTF-32 on Linux systems"); + + constexpr size_t MaxUtf8CharLength = 4; + + size_t len = std::wcslen(chars); + CheckedSizeT utf8MaxLen = CheckedSizeT(len) * MaxUtf8CharLength; + CheckedSizeT utf8BufLen = utf8MaxLen + 1; + if (!utf8BufLen.isValid()) { + JS_ReportAllocationOverflow(cx); + return nullptr; + } + auto utf8 = cx->make_pod_array(utf8BufLen.value()); + if (!utf8) { + return nullptr; + } + + char* dst = utf8.get(); + for (size_t i = 0; i < len; i++) { + uint8_t utf8buf[MaxUtf8CharLength]; + uint32_t utf8Len = OneUcs4ToUtf8Char(utf8buf, chars[i]); + for (size_t j = 0; j < utf8Len; j++) { + *dst++ = char(utf8buf[j]); + } + } + *dst = '\0'; + + return utf8; +#endif +} + +JS_PUBLIC_API JS::UniqueChars JS::EncodeUtf8ToNarrow(JSContext* cx, + const char* chars) { + // Convert the UTF-8 string to a wide string via EncodeUtf8ToWide() and + // then convert the resulting wide string to a narrow multibyte character + // string. + + auto wideChars = EncodeUtf8ToWide(cx, chars); + if (!wideChars) { + return nullptr; + } + + const wchar_t* cWideChars = wideChars.get(); + std::mbstate_t mb{}; + size_t narrowLen = std::wcsrtombs(nullptr, &cWideChars, 0, &mb); + if (narrowLen == size_t(-1)) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_CANT_CONVERT_TO_NARROW); + return nullptr; + } + MOZ_ASSERT(std::mbsinit(&mb), + "multi-byte state is in its initial state when no conversion " + "error occured"); + + size_t bufLen = narrowLen + 1; + auto narrow = cx->make_pod_array(bufLen); + if (!narrow) { + return nullptr; + } + + mozilla::DebugOnly actualLen = + std::wcsrtombs(narrow.get(), &cWideChars, bufLen, &mb); + MOZ_ASSERT(narrowLen == actualLen); + MOZ_ASSERT(narrow[actualLen] == '\0'); + + return narrow; +} + +JS_PUBLIC_API JS::UniqueWideChars JS::EncodeUtf8ToWide(JSContext* cx, + const char* chars) { + // Only valid UTF-8 strings should be passed to this function. + MOZ_ASSERT(mozilla::IsUtf8(mozilla::Span(chars, strlen(chars)))); + +#ifndef XP_LINUX + // Use the standard codecvt facet to convert from UTF-8 to a wide string. + std::codecvt_utf8 cv; + + size_t len = strlen(chars); + auto wideChars = cx->make_pod_array(len + 1); + if (!wideChars) { + return nullptr; + } + + // STL returns |codecvt_base::partial| for empty strings. + if (len == 0) { + return wideChars; + } + + std::mbstate_t mb{}; + const char* fromNext; + wchar_t* toNext; + std::codecvt_base::result result = + cv.in(mb, chars, chars + len, fromNext, wideChars.get(), + wideChars.get() + len, toNext); + if (result != std::codecvt_base::ok) { + MOZ_ASSERT(result == std::codecvt_base::error); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_CANT_CONVERT_UTF8_TO_WIDE); + return nullptr; + } + *toNext = '\0'; // Explicit null-termination required. + + return wideChars; +#else + static_assert(sizeof(wchar_t) == 4, + "Assume wchar_t is UTF-32 on Linux systems"); + + size_t len = strlen(chars); + auto wideChars = cx->make_pod_array(len + 1); + if (!wideChars) { + return nullptr; + } + + const auto* s = reinterpret_cast(chars); + const auto* const limit = s + len; + + wchar_t* dst = wideChars.get(); + while (s < limit) { + unsigned char c = *s++; + + if (mozilla::IsAscii(c)) { + *dst++ = wchar_t(c); + continue; + } + + mozilla::Utf8Unit utf8(c); + mozilla::Maybe codePoint = + mozilla::DecodeOneUtf8CodePoint(utf8, &s, limit); + MOZ_ASSERT(codePoint.isSome()); + *dst++ = wchar_t(*codePoint); + } + *dst++ = '\0'; + + return wideChars; +#endif +} + +bool StringBuffer::append(const Utf8Unit* units, size_t len) { + MOZ_ASSERT(maybeCx_); + + if (isLatin1()) { + Latin1CharBuffer& latin1 = latin1Chars(); + + while (len > 0) { + if (!IsAscii(*units)) { + break; + } + + if (!latin1.append(units->toUnsignedChar())) { + return false; + } + + ++units; + --len; + } + if (len == 0) { + return true; + } + + // Non-ASCII doesn't *necessarily* mean we couldn't keep appending to + // |latin1|, but it's only possible for [U+0080, U+0100) code points, + // and handling the full complexity of UTF-8 only for that very small + // additional range isn't worth it. Inflate to two-byte storage before + // appending the remaining code points. + if (!inflateChars()) { + return false; + } + } + + UTF8Chars remainingUtf8(units, len); + + // Determine how many UTF-16 code units are required to represent the + // remaining units. + size_t utf16Len = 0; + auto countInflated = [&utf16Len](char16_t c) -> LoopDisposition { + utf16Len++; + return LoopDisposition::Continue; + }; + if (!InflateUTF8ToUTF16(maybeCx_, remainingUtf8, + countInflated)) { + return false; + } + + TwoByteCharBuffer& buf = twoByteChars(); + + size_t i = buf.length(); + if (!buf.growByUninitialized(utf16Len)) { + return false; + } + MOZ_ASSERT(i + utf16Len == buf.length(), + "growByUninitialized assumed to increase length immediately"); + + char16_t* toFill = &buf[i]; + auto appendUtf16 = [&toFill](char16_t unit) { + *toFill++ = unit; + return LoopDisposition::Continue; + }; + + MOZ_ALWAYS_TRUE(InflateUTF8ToUTF16( + maybeCx_, remainingUtf8, appendUtf16)); + MOZ_ASSERT(toFill == buf.end()); + return true; +} diff --git a/js/src/vm/CheckIsObjectKind.h b/js/src/vm/CheckIsObjectKind.h new file mode 100644 index 0000000000..321870d6ed --- /dev/null +++ b/js/src/vm/CheckIsObjectKind.h @@ -0,0 +1,24 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_CheckIsObjectKind_h +#define vm_CheckIsObjectKind_h + +#include // uint8_t + +namespace js { + +enum class CheckIsObjectKind : uint8_t { + IteratorNext, + IteratorReturn, + IteratorThrow, + GetIterator, + GetAsyncIterator +}; + +} // namespace js + +#endif /* vm_CheckIsObjectKind_h */ diff --git a/js/src/vm/CodeCoverage.cpp b/js/src/vm/CodeCoverage.cpp new file mode 100644 index 0000000000..120fe1da6d --- /dev/null +++ b/js/src/vm/CodeCoverage.cpp @@ -0,0 +1,673 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/CodeCoverage.h" + +#include "mozilla/Atomics.h" +#include "mozilla/IntegerPrintfMacros.h" + +#include +#include + +#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator +#include "gc/Zone.h" +#include "util/GetPidProvider.h" // getpid() +#include "util/Text.h" +#include "vm/BytecodeUtil.h" +#include "vm/JSScript.h" +#include "vm/Realm.h" +#include "vm/Runtime.h" +#include "vm/Time.h" + +// This file contains a few functions which are used to produce files understood +// by lcov tools. A detailed description of the format is available in the man +// page for "geninfo" [1]. To make it short, the following paraphrases what is +// commented in the man page by using curly braces prefixed by for-each to +// express repeated patterns. +// +// TN: +// for-each { +// SF: +// for-each