From 2aa4a82499d4becd2284cdb482213d541b8804dd Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 28 Apr 2024 16:29:10 +0200 Subject: Adding upstream version 86.0.1. Signed-off-by: Daniel Baumann --- js/src/vm/Activation-inl.h | 171 + js/src/vm/Activation.cpp | 88 + js/src/vm/Activation.h | 573 +++ js/src/vm/ArgumentsObject-inl.h | 66 + js/src/vm/ArgumentsObject.cpp | 1041 ++++++ js/src/vm/ArgumentsObject.h | 496 +++ js/src/vm/ArrayBufferObject-inl.h | 81 + js/src/vm/ArrayBufferObject.cpp | 1904 ++++++++++ js/src/vm/ArrayBufferObject.h | 659 ++++ js/src/vm/ArrayBufferObjectMaybeShared.cpp | 60 + js/src/vm/ArrayBufferViewObject.cpp | 298 ++ js/src/vm/ArrayBufferViewObject.h | 165 + js/src/vm/ArrayObject-inl.h | 87 + js/src/vm/ArrayObject.h | 64 + js/src/vm/AsyncFunction.cpp | 287 ++ js/src/vm/AsyncFunction.h | 325 ++ js/src/vm/AsyncFunctionResolveKind.h | 18 + js/src/vm/AsyncIteration.cpp | 700 ++++ js/src/vm/AsyncIteration.h | 591 +++ js/src/vm/AtomsTable.h | 222 ++ js/src/vm/BigIntType.cpp | 3840 +++++++++++++++++++ js/src/vm/BigIntType.h | 476 +++ js/src/vm/BindingKind.h | 102 + js/src/vm/BooleanObject-inl.h | 28 + js/src/vm/BooleanObject.h | 46 + js/src/vm/BuildId.cpp | 27 + js/src/vm/BuiltinObjectKind.cpp | 175 + js/src/vm/BuiltinObjectKind.h | 85 + js/src/vm/BytecodeFormatFlags.h | 57 + js/src/vm/BytecodeIterator-inl.h | 40 + js/src/vm/BytecodeIterator.h | 85 + js/src/vm/BytecodeLocation-inl.h | 111 + js/src/vm/BytecodeLocation.cpp | 28 + js/src/vm/BytecodeLocation.h | 347 ++ js/src/vm/BytecodeUtil-inl.h | 240 ++ js/src/vm/BytecodeUtil.cpp | 3036 +++++++++++++++ js/src/vm/BytecodeUtil.h | 723 ++++ js/src/vm/Caches-inl.h | 86 + js/src/vm/Caches.cpp | 23 + js/src/vm/Caches.h | 294 ++ js/src/vm/CallNonGenericMethod.cpp | 35 + js/src/vm/CharacterEncoding.cpp | 697 ++++ js/src/vm/CheckIsObjectKind.h | 24 + js/src/vm/CodeCoverage.cpp | 685 ++++ js/src/vm/CodeCoverage.h | 178 + js/src/vm/CommonPropertyNames.h | 538 +++ js/src/vm/Compartment-inl.h | 414 +++ js/src/vm/Compartment.cpp | 569 +++ js/src/vm/Compartment.h | 520 +++ js/src/vm/CompilationAndEvaluation.cpp | 581 +++ js/src/vm/Compression.cpp | 260 ++ js/src/vm/Compression.h | 115 + js/src/vm/DateObject.h | 97 + js/src/vm/DateTime.cpp | 784 ++++ js/src/vm/DateTime.h | 368 ++ js/src/vm/EnvironmentObject-inl.h | 89 + js/src/vm/EnvironmentObject.cpp | 4254 +++++++++++++++++++++ js/src/vm/EnvironmentObject.h | 1250 +++++++ js/src/vm/EqualityOperations.cpp | 252 ++ js/src/vm/EqualityOperations.h | 46 + js/src/vm/ErrorMessages.cpp | 29 + js/src/vm/ErrorObject-inl.h | 38 + js/src/vm/ErrorObject.cpp | 778 ++++ js/src/vm/ErrorObject.h | 129 + js/src/vm/ErrorReporting.cpp | 570 +++ js/src/vm/ErrorReporting.h | 176 + js/src/vm/Exception.cpp | 60 + js/src/vm/ForOfIterator.cpp | 211 ++ js/src/vm/FrameIter-inl.h | 57 + js/src/vm/FrameIter.cpp | 1063 ++++++ js/src/vm/FrameIter.h | 619 ++++ js/src/vm/FunctionFlags.cpp | 13 + js/src/vm/FunctionFlags.h | 334 ++ js/src/vm/FunctionPrefixKind.h | 18 + js/src/vm/GeckoProfiler-inl.h | 116 + js/src/vm/GeckoProfiler.cpp | 572 +++ js/src/vm/GeckoProfiler.h | 217 ++ js/src/vm/GeneratorAndAsyncKind.h | 17 + js/src/vm/GeneratorObject.cpp | 530 +++ js/src/vm/GeneratorObject.h | 254 ++ js/src/vm/GeneratorResumeKind.h | 18 + js/src/vm/GlobalObject-inl.h | 30 + js/src/vm/GlobalObject.cpp | 1136 ++++++ js/src/vm/GlobalObject.h | 998 +++++ js/src/vm/HelperThreadState.h | 677 ++++ js/src/vm/HelperThreadTask.h | 68 + js/src/vm/HelperThreads.cpp | 2683 ++++++++++++++ js/src/vm/HelperThreads.h | 274 ++ js/src/vm/Id.cpp | 52 + js/src/vm/Initialization.cpp | 301 ++ js/src/vm/InlineCharBuffer-inl.h | 159 + js/src/vm/Instrumentation.cpp | 289 ++ js/src/vm/Instrumentation.h | 109 + js/src/vm/Interpreter-inl.h | 682 ++++ js/src/vm/Interpreter.cpp | 5273 +++++++++++++++++++++++++++ js/src/vm/Interpreter.h | 682 ++++ js/src/vm/IsGivenTypeObject-inl.h | 33 + js/src/vm/Iteration.cpp | 1790 +++++++++ js/src/vm/Iteration.h | 508 +++ js/src/vm/JSAtom-inl.h | 183 + js/src/vm/JSAtom.cpp | 1450 ++++++++ js/src/vm/JSAtom.h | 116 + js/src/vm/JSAtomState.h | 56 + js/src/vm/JSContext-inl.h | 474 +++ js/src/vm/JSContext.cpp | 1246 +++++++ js/src/vm/JSContext.h | 1281 +++++++ js/src/vm/JSFunction-inl.h | 105 + js/src/vm/JSFunction.cpp | 2527 +++++++++++++ js/src/vm/JSFunction.h | 928 +++++ js/src/vm/JSONParser.cpp | 817 +++++ js/src/vm/JSONParser.h | 284 ++ js/src/vm/JSONPrinter.cpp | 259 ++ js/src/vm/JSONPrinter.h | 99 + js/src/vm/JSObject-inl.h | 673 ++++ js/src/vm/JSObject.cpp | 3951 ++++++++++++++++++++ js/src/vm/JSObject.h | 1074 ++++++ js/src/vm/JSScript-inl.h | 265 ++ js/src/vm/JSScript.cpp | 5128 ++++++++++++++++++++++++++ js/src/vm/JSScript.h | 2506 +++++++++++++ js/src/vm/JitActivation.cpp | 261 ++ js/src/vm/JitActivation.h | 268 ++ js/src/vm/List-inl.h | 130 + js/src/vm/List.cpp | 11 + js/src/vm/List.h | 91 + js/src/vm/MallocProvider.h | 255 ++ js/src/vm/MatchPairs.h | 123 + js/src/vm/MemoryMetrics.cpp | 875 +++++ js/src/vm/ModuleBuilder.h | 123 + js/src/vm/Modules.cpp | 197 + js/src/vm/Monitor.h | 75 + js/src/vm/MutexIDs.h | 79 + js/src/vm/NativeObject-inl.h | 869 +++++ js/src/vm/NativeObject.cpp | 2881 +++++++++++++++ js/src/vm/NativeObject.h | 1721 +++++++++ js/src/vm/NumberObject-inl.h | 28 + js/src/vm/NumberObject.h | 44 + js/src/vm/ObjectGroup.cpp | 395 ++ js/src/vm/ObjectGroup.h | 228 ++ js/src/vm/ObjectOperations-inl.h | 360 ++ js/src/vm/ObjectOperations.h | 296 ++ js/src/vm/OffThreadPromiseRuntimeState.cpp | 298 ++ js/src/vm/OffThreadPromiseRuntimeState.h | 207 ++ js/src/vm/OffThreadScriptCompilation.cpp | 221 ++ js/src/vm/Opcodes.h | 3639 ++++++++++++++++++ js/src/vm/PIC.cpp | 370 ++ js/src/vm/PIC.h | 243 ++ js/src/vm/PlainObject-inl.h | 104 + js/src/vm/PlainObject.cpp | 69 + js/src/vm/PlainObject.h | 51 + js/src/vm/Printer.cpp | 574 +++ js/src/vm/Printer.h | 235 ++ js/src/vm/Probes-inl.h | 97 + js/src/vm/Probes.cpp | 65 + js/src/vm/Probes.h | 142 + js/src/vm/ProfilingStack.cpp | 55 + js/src/vm/PromiseLookup.cpp | 272 ++ js/src/vm/PromiseLookup.h | 165 + js/src/vm/PromiseObject.h | 243 ++ js/src/vm/ProxyObject.cpp | 214 ++ js/src/vm/ProxyObject.h | 163 + js/src/vm/Realm-inl.h | 116 + js/src/vm/Realm.cpp | 841 +++++ js/src/vm/Realm.h | 907 +++++ js/src/vm/ReceiverGuard-inl.h | 29 + js/src/vm/ReceiverGuard.cpp | 18 + js/src/vm/ReceiverGuard.h | 82 + js/src/vm/RegExpObject.cpp | 1247 +++++++ js/src/vm/RegExpObject.h | 221 ++ js/src/vm/RegExpShared.h | 423 +++ js/src/vm/RegExpStatics.cpp | 113 + js/src/vm/RegExpStatics.h | 307 ++ js/src/vm/RegExpStaticsObject.h | 27 + js/src/vm/Runtime.cpp | 895 +++++ js/src/vm/Runtime.h | 1221 +++++++ js/src/vm/SavedFrame.h | 291 ++ js/src/vm/SavedStacks-inl.h | 29 + js/src/vm/SavedStacks.cpp | 2059 +++++++++++ js/src/vm/SavedStacks.h | 341 ++ js/src/vm/Scope.cpp | 2337 ++++++++++++ js/src/vm/Scope.h | 1822 +++++++++ js/src/vm/ScopeKind.h | 53 + js/src/vm/SelfHosting.cpp | 3397 +++++++++++++++++ js/src/vm/SelfHosting.h | 88 + js/src/vm/Shape-inl.h | 463 +++ js/src/vm/Shape.cpp | 2224 +++++++++++ js/src/vm/Shape.h | 1854 ++++++++++ js/src/vm/SharedArrayObject.cpp | 476 +++ js/src/vm/SharedArrayObject.h | 257 ++ js/src/vm/SharedImmutableStringsCache-inl.h | 76 + js/src/vm/SharedImmutableStringsCache.cpp | 114 + js/src/vm/SharedImmutableStringsCache.h | 458 +++ js/src/vm/SharedMem.h | 202 + js/src/vm/SharedStencil.h | 631 ++++ js/src/vm/SourceHook.cpp | 26 + js/src/vm/Stack-inl.h | 859 +++++ js/src/vm/Stack.cpp | 748 ++++ js/src/vm/Stack.h | 1010 +++++ js/src/vm/StencilEnums.h | 328 ++ js/src/vm/StringObject-inl.h | 51 + js/src/vm/StringObject.h | 69 + js/src/vm/StringType-inl.h | 453 +++ js/src/vm/StringType.cpp | 2266 ++++++++++++ js/src/vm/StringType.h | 2089 +++++++++++ js/src/vm/StructuredClone.cpp | 3642 ++++++++++++++++++ js/src/vm/SymbolType.cpp | 148 + js/src/vm/SymbolType.h | 154 + js/src/vm/TaggedProto.cpp | 33 + js/src/vm/TaggedProto.h | 167 + js/src/vm/ThrowMsgKind.cpp | 29 + js/src/vm/ThrowMsgKind.h | 33 + js/src/vm/Time.cpp | 397 ++ js/src/vm/Time.h | 180 + js/src/vm/ToSource.cpp | 232 ++ js/src/vm/ToSource.h | 26 + js/src/vm/TraceLogging.cpp | 1844 ++++++++++ js/src/vm/TraceLogging.h | 708 ++++ js/src/vm/TraceLoggingGraph.cpp | 721 ++++ js/src/vm/TraceLoggingGraph.h | 263 ++ js/src/vm/TraceLoggingTypes.cpp | 20 + js/src/vm/TraceLoggingTypes.h | 308 ++ js/src/vm/TypedArrayObject-inl.h | 758 ++++ js/src/vm/TypedArrayObject.cpp | 2767 ++++++++++++++ js/src/vm/TypedArrayObject.h | 323 ++ js/src/vm/UbiNode.cpp | 514 +++ js/src/vm/UbiNodeCensus.cpp | 1371 +++++++ js/src/vm/UbiNodeShortestPaths.cpp | 95 + js/src/vm/Uint8Clamped.h | 123 + js/src/vm/UsageStatistics.cpp | 20 + js/src/vm/Value.cpp | 41 + js/src/vm/Warnings.cpp | 105 + js/src/vm/Warnings.h | 27 + js/src/vm/WindowProxy.cpp | 69 + js/src/vm/WrapperObject.h | 40 + js/src/vm/Xdr.cpp | 848 +++++ js/src/vm/Xdr.h | 876 +++++ js/src/vm/jsopcode.py | 382 ++ js/src/vm/make_opcode_doc.py | 196 + 237 files changed, 140201 insertions(+) create mode 100644 js/src/vm/Activation-inl.h create mode 100644 js/src/vm/Activation.cpp create mode 100644 js/src/vm/Activation.h create mode 100644 js/src/vm/ArgumentsObject-inl.h create mode 100644 js/src/vm/ArgumentsObject.cpp create mode 100644 js/src/vm/ArgumentsObject.h create mode 100644 js/src/vm/ArrayBufferObject-inl.h create mode 100644 js/src/vm/ArrayBufferObject.cpp create mode 100644 js/src/vm/ArrayBufferObject.h create mode 100644 js/src/vm/ArrayBufferObjectMaybeShared.cpp create mode 100644 js/src/vm/ArrayBufferViewObject.cpp create mode 100644 js/src/vm/ArrayBufferViewObject.h create mode 100644 js/src/vm/ArrayObject-inl.h create mode 100644 js/src/vm/ArrayObject.h create mode 100644 js/src/vm/AsyncFunction.cpp create mode 100644 js/src/vm/AsyncFunction.h create mode 100644 js/src/vm/AsyncFunctionResolveKind.h create mode 100644 js/src/vm/AsyncIteration.cpp create mode 100644 js/src/vm/AsyncIteration.h create mode 100644 js/src/vm/AtomsTable.h create mode 100644 js/src/vm/BigIntType.cpp create mode 100644 js/src/vm/BigIntType.h create mode 100644 js/src/vm/BindingKind.h create mode 100644 js/src/vm/BooleanObject-inl.h create mode 100644 js/src/vm/BooleanObject.h create mode 100644 js/src/vm/BuildId.cpp create mode 100644 js/src/vm/BuiltinObjectKind.cpp create mode 100644 js/src/vm/BuiltinObjectKind.h create mode 100644 js/src/vm/BytecodeFormatFlags.h create mode 100644 js/src/vm/BytecodeIterator-inl.h create mode 100644 js/src/vm/BytecodeIterator.h create mode 100644 js/src/vm/BytecodeLocation-inl.h create mode 100644 js/src/vm/BytecodeLocation.cpp create mode 100644 js/src/vm/BytecodeLocation.h create mode 100644 js/src/vm/BytecodeUtil-inl.h create mode 100644 js/src/vm/BytecodeUtil.cpp create mode 100644 js/src/vm/BytecodeUtil.h create mode 100644 js/src/vm/Caches-inl.h create mode 100644 js/src/vm/Caches.cpp create mode 100644 js/src/vm/Caches.h create mode 100644 js/src/vm/CallNonGenericMethod.cpp create mode 100644 js/src/vm/CharacterEncoding.cpp create mode 100644 js/src/vm/CheckIsObjectKind.h create mode 100644 js/src/vm/CodeCoverage.cpp create mode 100644 js/src/vm/CodeCoverage.h create mode 100644 js/src/vm/CommonPropertyNames.h create mode 100644 js/src/vm/Compartment-inl.h create mode 100644 js/src/vm/Compartment.cpp create mode 100644 js/src/vm/Compartment.h create mode 100644 js/src/vm/CompilationAndEvaluation.cpp create mode 100644 js/src/vm/Compression.cpp create mode 100644 js/src/vm/Compression.h create mode 100644 js/src/vm/DateObject.h create mode 100644 js/src/vm/DateTime.cpp create mode 100644 js/src/vm/DateTime.h create mode 100644 js/src/vm/EnvironmentObject-inl.h create mode 100644 js/src/vm/EnvironmentObject.cpp create mode 100644 js/src/vm/EnvironmentObject.h create mode 100644 js/src/vm/EqualityOperations.cpp create mode 100644 js/src/vm/EqualityOperations.h create mode 100644 js/src/vm/ErrorMessages.cpp create mode 100644 js/src/vm/ErrorObject-inl.h create mode 100644 js/src/vm/ErrorObject.cpp create mode 100644 js/src/vm/ErrorObject.h create mode 100644 js/src/vm/ErrorReporting.cpp create mode 100644 js/src/vm/ErrorReporting.h create mode 100644 js/src/vm/Exception.cpp create mode 100644 js/src/vm/ForOfIterator.cpp create mode 100644 js/src/vm/FrameIter-inl.h create mode 100644 js/src/vm/FrameIter.cpp create mode 100644 js/src/vm/FrameIter.h create mode 100644 js/src/vm/FunctionFlags.cpp create mode 100644 js/src/vm/FunctionFlags.h create mode 100644 js/src/vm/FunctionPrefixKind.h create mode 100644 js/src/vm/GeckoProfiler-inl.h create mode 100644 js/src/vm/GeckoProfiler.cpp create mode 100644 js/src/vm/GeckoProfiler.h create mode 100644 js/src/vm/GeneratorAndAsyncKind.h create mode 100644 js/src/vm/GeneratorObject.cpp create mode 100644 js/src/vm/GeneratorObject.h create mode 100644 js/src/vm/GeneratorResumeKind.h create mode 100644 js/src/vm/GlobalObject-inl.h create mode 100644 js/src/vm/GlobalObject.cpp create mode 100644 js/src/vm/GlobalObject.h create mode 100644 js/src/vm/HelperThreadState.h create mode 100644 js/src/vm/HelperThreadTask.h create mode 100644 js/src/vm/HelperThreads.cpp create mode 100644 js/src/vm/HelperThreads.h create mode 100644 js/src/vm/Id.cpp create mode 100644 js/src/vm/Initialization.cpp create mode 100644 js/src/vm/InlineCharBuffer-inl.h create mode 100644 js/src/vm/Instrumentation.cpp create mode 100644 js/src/vm/Instrumentation.h create mode 100644 js/src/vm/Interpreter-inl.h create mode 100644 js/src/vm/Interpreter.cpp create mode 100644 js/src/vm/Interpreter.h create mode 100644 js/src/vm/IsGivenTypeObject-inl.h create mode 100644 js/src/vm/Iteration.cpp create mode 100644 js/src/vm/Iteration.h create mode 100644 js/src/vm/JSAtom-inl.h create mode 100644 js/src/vm/JSAtom.cpp create mode 100644 js/src/vm/JSAtom.h create mode 100644 js/src/vm/JSAtomState.h create mode 100644 js/src/vm/JSContext-inl.h create mode 100644 js/src/vm/JSContext.cpp create mode 100644 js/src/vm/JSContext.h create mode 100644 js/src/vm/JSFunction-inl.h create mode 100644 js/src/vm/JSFunction.cpp create mode 100644 js/src/vm/JSFunction.h create mode 100644 js/src/vm/JSONParser.cpp create mode 100644 js/src/vm/JSONParser.h create mode 100644 js/src/vm/JSONPrinter.cpp create mode 100644 js/src/vm/JSONPrinter.h create mode 100644 js/src/vm/JSObject-inl.h create mode 100644 js/src/vm/JSObject.cpp create mode 100644 js/src/vm/JSObject.h create mode 100644 js/src/vm/JSScript-inl.h create mode 100644 js/src/vm/JSScript.cpp create mode 100644 js/src/vm/JSScript.h create mode 100644 js/src/vm/JitActivation.cpp create mode 100644 js/src/vm/JitActivation.h create mode 100644 js/src/vm/List-inl.h create mode 100644 js/src/vm/List.cpp create mode 100644 js/src/vm/List.h create mode 100644 js/src/vm/MallocProvider.h create mode 100644 js/src/vm/MatchPairs.h create mode 100644 js/src/vm/MemoryMetrics.cpp create mode 100644 js/src/vm/ModuleBuilder.h create mode 100644 js/src/vm/Modules.cpp create mode 100644 js/src/vm/Monitor.h create mode 100644 js/src/vm/MutexIDs.h create mode 100644 js/src/vm/NativeObject-inl.h create mode 100644 js/src/vm/NativeObject.cpp create mode 100644 js/src/vm/NativeObject.h create mode 100644 js/src/vm/NumberObject-inl.h create mode 100644 js/src/vm/NumberObject.h create mode 100644 js/src/vm/ObjectGroup.cpp create mode 100644 js/src/vm/ObjectGroup.h create mode 100644 js/src/vm/ObjectOperations-inl.h create mode 100644 js/src/vm/ObjectOperations.h create mode 100644 js/src/vm/OffThreadPromiseRuntimeState.cpp create mode 100644 js/src/vm/OffThreadPromiseRuntimeState.h create mode 100644 js/src/vm/OffThreadScriptCompilation.cpp create mode 100644 js/src/vm/Opcodes.h create mode 100644 js/src/vm/PIC.cpp create mode 100644 js/src/vm/PIC.h create mode 100644 js/src/vm/PlainObject-inl.h create mode 100644 js/src/vm/PlainObject.cpp create mode 100644 js/src/vm/PlainObject.h create mode 100644 js/src/vm/Printer.cpp create mode 100644 js/src/vm/Printer.h create mode 100644 js/src/vm/Probes-inl.h create mode 100644 js/src/vm/Probes.cpp create mode 100644 js/src/vm/Probes.h create mode 100644 js/src/vm/ProfilingStack.cpp create mode 100644 js/src/vm/PromiseLookup.cpp create mode 100644 js/src/vm/PromiseLookup.h create mode 100644 js/src/vm/PromiseObject.h create mode 100644 js/src/vm/ProxyObject.cpp create mode 100644 js/src/vm/ProxyObject.h create mode 100644 js/src/vm/Realm-inl.h create mode 100644 js/src/vm/Realm.cpp create mode 100644 js/src/vm/Realm.h create mode 100644 js/src/vm/ReceiverGuard-inl.h create mode 100644 js/src/vm/ReceiverGuard.cpp create mode 100644 js/src/vm/ReceiverGuard.h create mode 100644 js/src/vm/RegExpObject.cpp create mode 100644 js/src/vm/RegExpObject.h create mode 100644 js/src/vm/RegExpShared.h create mode 100644 js/src/vm/RegExpStatics.cpp create mode 100644 js/src/vm/RegExpStatics.h create mode 100644 js/src/vm/RegExpStaticsObject.h create mode 100644 js/src/vm/Runtime.cpp create mode 100644 js/src/vm/Runtime.h create mode 100644 js/src/vm/SavedFrame.h create mode 100644 js/src/vm/SavedStacks-inl.h create mode 100644 js/src/vm/SavedStacks.cpp create mode 100644 js/src/vm/SavedStacks.h create mode 100644 js/src/vm/Scope.cpp create mode 100644 js/src/vm/Scope.h create mode 100644 js/src/vm/ScopeKind.h create mode 100644 js/src/vm/SelfHosting.cpp create mode 100644 js/src/vm/SelfHosting.h create mode 100644 js/src/vm/Shape-inl.h create mode 100644 js/src/vm/Shape.cpp create mode 100644 js/src/vm/Shape.h create mode 100644 js/src/vm/SharedArrayObject.cpp create mode 100644 js/src/vm/SharedArrayObject.h create mode 100644 js/src/vm/SharedImmutableStringsCache-inl.h create mode 100644 js/src/vm/SharedImmutableStringsCache.cpp create mode 100644 js/src/vm/SharedImmutableStringsCache.h create mode 100644 js/src/vm/SharedMem.h create mode 100644 js/src/vm/SharedStencil.h create mode 100644 js/src/vm/SourceHook.cpp create mode 100644 js/src/vm/Stack-inl.h create mode 100644 js/src/vm/Stack.cpp create mode 100644 js/src/vm/Stack.h create mode 100644 js/src/vm/StencilEnums.h create mode 100644 js/src/vm/StringObject-inl.h create mode 100644 js/src/vm/StringObject.h create mode 100644 js/src/vm/StringType-inl.h create mode 100644 js/src/vm/StringType.cpp create mode 100644 js/src/vm/StringType.h create mode 100644 js/src/vm/StructuredClone.cpp create mode 100644 js/src/vm/SymbolType.cpp create mode 100644 js/src/vm/SymbolType.h create mode 100644 js/src/vm/TaggedProto.cpp create mode 100644 js/src/vm/TaggedProto.h create mode 100644 js/src/vm/ThrowMsgKind.cpp create mode 100644 js/src/vm/ThrowMsgKind.h create mode 100644 js/src/vm/Time.cpp create mode 100644 js/src/vm/Time.h create mode 100644 js/src/vm/ToSource.cpp create mode 100644 js/src/vm/ToSource.h create mode 100644 js/src/vm/TraceLogging.cpp create mode 100644 js/src/vm/TraceLogging.h create mode 100644 js/src/vm/TraceLoggingGraph.cpp create mode 100644 js/src/vm/TraceLoggingGraph.h create mode 100644 js/src/vm/TraceLoggingTypes.cpp create mode 100644 js/src/vm/TraceLoggingTypes.h create mode 100644 js/src/vm/TypedArrayObject-inl.h create mode 100644 js/src/vm/TypedArrayObject.cpp create mode 100644 js/src/vm/TypedArrayObject.h create mode 100644 js/src/vm/UbiNode.cpp create mode 100644 js/src/vm/UbiNodeCensus.cpp create mode 100644 js/src/vm/UbiNodeShortestPaths.cpp create mode 100644 js/src/vm/Uint8Clamped.h create mode 100644 js/src/vm/UsageStatistics.cpp create mode 100644 js/src/vm/Value.cpp create mode 100644 js/src/vm/Warnings.cpp create mode 100644 js/src/vm/Warnings.h create mode 100644 js/src/vm/WindowProxy.cpp create mode 100644 js/src/vm/WrapperObject.h create mode 100644 js/src/vm/Xdr.cpp create mode 100644 js/src/vm/Xdr.h create mode 100644 js/src/vm/jsopcode.py create mode 100755 js/src/vm/make_opcode_doc.py (limited to 'js/src/vm') diff --git a/js/src/vm/Activation-inl.h b/js/src/vm/Activation-inl.h new file mode 100644 index 0000000000..76e054eb44 --- /dev/null +++ b/js/src/vm/Activation-inl.h @@ -0,0 +1,171 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_Activation_inl_h +#define vm_Activation_inl_h + +#include "vm/Activation.h" + +#include "mozilla/Assertions.h" // MOZ_ASSERT{,_IF}, MOZ_CRASH +#include "mozilla/Likely.h" // MOZ_UNLIKELY +#include "mozilla/Maybe.h" // mozilla::Maybe + +#include "jit/CalleeToken.h" // js::jit::CalleeToken +#include "vm/FrameIter.h" // js::FrameIter +#include "vm/JitActivation.h" // js::jit::JitActivation +#include "vm/JSContext.h" // JSContext +#include "vm/Stack.h" // js::AbstractFramePtr + +namespace js { + +inline ActivationEntryMonitor::ActivationEntryMonitor(JSContext* cx) + : cx_(cx), entryMonitor_(cx->entryMonitor) { + cx->entryMonitor = nullptr; +} + +inline ActivationEntryMonitor::ActivationEntryMonitor( + JSContext* cx, InterpreterFrame* entryFrame) + : ActivationEntryMonitor(cx) { + if (MOZ_UNLIKELY(entryMonitor_)) { + init(cx, entryFrame); + } +} + +inline ActivationEntryMonitor::ActivationEntryMonitor( + JSContext* cx, jit::CalleeToken entryToken) + : ActivationEntryMonitor(cx) { + if (MOZ_UNLIKELY(entryMonitor_)) { + init(cx, entryToken); + } +} + +inline ActivationEntryMonitor::~ActivationEntryMonitor() { + if (entryMonitor_) { + entryMonitor_->Exit(cx_); + } + + cx_->entryMonitor = entryMonitor_; +} + +inline Activation::Activation(JSContext* cx, Kind kind) + : cx_(cx), + compartment_(cx->compartment()), + prev_(cx->activation_), + prevProfiling_(prev_ ? prev_->mostRecentProfiling() : nullptr), + hideScriptedCallerCount_(0), + frameCache_(cx), + asyncStack_(cx, cx->asyncStackForNewActivations()), + asyncCause_(cx->asyncCauseForNewActivations), + asyncCallIsExplicit_(cx->asyncCallIsExplicit), + kind_(kind) { + cx->asyncStackForNewActivations() = nullptr; + cx->asyncCauseForNewActivations = nullptr; + cx->asyncCallIsExplicit = false; + cx->activation_ = this; +} + +inline Activation::~Activation() { + MOZ_ASSERT_IF(isProfiling(), this != cx_->profilingActivation_); + MOZ_ASSERT(cx_->activation_ == this); + MOZ_ASSERT(hideScriptedCallerCount_ == 0); + cx_->activation_ = prev_; + cx_->asyncCauseForNewActivations = asyncCause_; + cx_->asyncStackForNewActivations() = asyncStack_; + cx_->asyncCallIsExplicit = asyncCallIsExplicit_; +} + +inline bool Activation::isProfiling() const { + if (isInterpreter()) { + return asInterpreter()->isProfiling(); + } + + MOZ_ASSERT(isJit()); + return asJit()->isProfiling(); +} + +inline Activation* Activation::mostRecentProfiling() { + if (isProfiling()) { + return this; + } + return prevProfiling_; +} + +inline LiveSavedFrameCache* Activation::getLiveSavedFrameCache(JSContext* cx) { + if (!frameCache_.get().initialized() && !frameCache_.get().init(cx)) { + return nullptr; + } + return frameCache_.address(); +} + +/* static */ inline mozilla::Maybe +LiveSavedFrameCache::FramePtr::create(const FrameIter& iter) { + if (iter.done()) { + return mozilla::Nothing(); + } + + if (iter.isPhysicalJitFrame()) { + return mozilla::Some(FramePtr(iter.physicalJitFrame())); + } + + if (!iter.hasUsableAbstractFramePtr()) { + return mozilla::Nothing(); + } + + auto afp = iter.abstractFramePtr(); + + if (afp.isInterpreterFrame()) { + return mozilla::Some(FramePtr(afp.asInterpreterFrame())); + } + if (afp.isWasmDebugFrame()) { + return mozilla::Some(FramePtr(afp.asWasmDebugFrame())); + } + if (afp.isRematerializedFrame()) { + return mozilla::Some(FramePtr(afp.asRematerializedFrame())); + } + + MOZ_CRASH("unexpected frame type"); +} + +struct LiveSavedFrameCache::FramePtr::HasCachedMatcher { + template + bool operator()(Frame* f) const { + return f->hasCachedSavedFrame(); + } +}; + +inline bool LiveSavedFrameCache::FramePtr::hasCachedSavedFrame() const { + return ptr.match(HasCachedMatcher()); +} + +struct LiveSavedFrameCache::FramePtr::SetHasCachedMatcher { + template + void operator()(Frame* f) { + f->setHasCachedSavedFrame(); + } +}; + +inline void LiveSavedFrameCache::FramePtr::setHasCachedSavedFrame() { + ptr.match(SetHasCachedMatcher()); +} + +struct LiveSavedFrameCache::FramePtr::ClearHasCachedMatcher { + template + void operator()(Frame* f) { + f->clearHasCachedSavedFrame(); + } +}; + +inline void LiveSavedFrameCache::FramePtr::clearHasCachedSavedFrame() { + ptr.match(ClearHasCachedMatcher()); +} + +inline bool Activation::hasWasmExitFP() const { + return isJit() && asJit()->hasWasmExitFP(); +} + +} // namespace js + +#endif // vm_Activation_inl_h diff --git a/js/src/vm/Activation.cpp b/js/src/vm/Activation.cpp new file mode 100644 index 0000000000..6704444a56 --- /dev/null +++ b/js/src/vm/Activation.cpp @@ -0,0 +1,88 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/Activation-inl.h" + +#include "mozilla/Assertions.h" // MOZ_ASSERT + +#include // size_t +#include // uint8_t, uint32_t + +#include "debugger/DebugAPI.h" // js::DebugAPI +#include "gc/GC.h" // js::gc::AutoSuppressGC +#include "jit/CalleeToken.h" // js::jit::CalleeToken{IsFunction,To{Function,Script}} +#include "js/RootingAPI.h" // JS::Rooted +#include "js/Value.h" // JS::Value +#include "vm/JSContext.h" // JSContext, js::TlsContext +#include "vm/Stack.h" // js::InterpreterFrame + +#include "vm/Compartment-inl.h" // JS::Compartment::wrap + +using namespace js; + +using JS::ObjectOrNullValue; +using JS::Rooted; +using JS::UndefinedValue; +using JS::Value; + +Value ActivationEntryMonitor::asyncStack(JSContext* cx) { + Rooted stack(cx, ObjectOrNullValue(cx->asyncStackForNewActivations())); + if (!cx->compartment()->wrap(cx, &stack)) { + cx->clearPendingException(); + return UndefinedValue(); + } + return stack; +} + +void ActivationEntryMonitor::init(JSContext* cx, InterpreterFrame* entryFrame) { + // The InterpreterFrame is not yet part of an Activation, so it won't + // be traced if we trigger GC here. Suppress GC to avoid this. + gc::AutoSuppressGC suppressGC(cx); + Rooted stack(cx, asyncStack(cx)); + const char* asyncCause = cx->asyncCauseForNewActivations; + if (entryFrame->isFunctionFrame()) { + entryMonitor_->Entry(cx, &entryFrame->callee(), stack, asyncCause); + } else { + entryMonitor_->Entry(cx, entryFrame->script(), stack, asyncCause); + } +} + +void ActivationEntryMonitor::init(JSContext* cx, jit::CalleeToken entryToken) { + // The CalleeToken is not traced at this point and we also don't want + // a GC to discard the code we're about to enter, so we suppress GC. + gc::AutoSuppressGC suppressGC(cx); + RootedValue stack(cx, asyncStack(cx)); + const char* asyncCause = cx->asyncCauseForNewActivations; + if (jit::CalleeTokenIsFunction(entryToken)) { + entryMonitor_->Entry(cx_, jit::CalleeTokenToFunction(entryToken), stack, + asyncCause); + } else { + entryMonitor_->Entry(cx_, jit::CalleeTokenToScript(entryToken), stack, + asyncCause); + } +} + +void Activation::registerProfiling() { + MOZ_ASSERT(isProfiling()); + cx_->profilingActivation_ = this; +} + +void Activation::unregisterProfiling() { + MOZ_ASSERT(isProfiling()); + MOZ_ASSERT(cx_->profilingActivation_ == this); + cx_->profilingActivation_ = prevProfiling_; +} + +ActivationIterator::ActivationIterator(JSContext* cx) + : activation_(cx->activation_) { + MOZ_ASSERT(cx == TlsContext.get()); +} + +ActivationIterator& ActivationIterator::operator++() { + MOZ_ASSERT(activation_); + activation_ = activation_->prev(); + return *this; +} diff --git a/js/src/vm/Activation.h b/js/src/vm/Activation.h new file mode 100644 index 0000000000..a55c87799a --- /dev/null +++ b/js/src/vm/Activation.h @@ -0,0 +1,573 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_Activation_h +#define vm_Activation_h + +#include "mozilla/Assertions.h" // MOZ_ASSERT +#include "mozilla/Attributes.h" // MOZ_RAII + +#include // size_t +#include // uint8_t, uint32_t + +#include "jstypes.h" // JS_PUBLIC_API + +#include "jit/CalleeToken.h" // js::jit::CalleeToken +#include "js/CallArgs.h" // JS::CallArgs +#include "js/RootingAPI.h" // JS::Handle, JS::Rooted +#include "js/TypeDecls.h" // jsbytecode +#include "js/UniquePtr.h" // js::UniquePtr +#include "js/Value.h" // JS::Value +#include "vm/SavedFrame.h" // js::SavedFrame +#include "vm/Stack.h" // js::InterpreterRegs + +struct JS_PUBLIC_API JSContext; + +class JSFunction; +class JSObject; +class JSScript; + +namespace JS { + +class JS_PUBLIC_API Compartment; + +namespace dbg { +class JS_PUBLIC_API AutoEntryMonitor; +} // namespace dbg + +} // namespace JS + +namespace js { + +class InterpreterActivation; +class InterpreterFrame; + +namespace jit { +class JitActivation; +class JitFrameLayout; +} // namespace jit + +// This class is separate from Activation, because it calls Compartment::wrap() +// which can GC and walk the stack. It's not safe to do that within the +// JitActivation constructor. +class MOZ_RAII ActivationEntryMonitor { + JSContext* cx_; + + // The entry point monitor that was set on cx_->runtime() when this + // ActivationEntryMonitor was created. + JS::dbg::AutoEntryMonitor* entryMonitor_; + + explicit inline ActivationEntryMonitor(JSContext* cx); + + ActivationEntryMonitor(const ActivationEntryMonitor& other) = delete; + void operator=(const ActivationEntryMonitor& other) = delete; + + void init(JSContext* cx, jit::CalleeToken entryToken); + void init(JSContext* cx, InterpreterFrame* entryFrame); + + JS::Value asyncStack(JSContext* cx); + + public: + inline ActivationEntryMonitor(JSContext* cx, InterpreterFrame* entryFrame); + inline ActivationEntryMonitor(JSContext* cx, jit::CalleeToken entryToken); + inline ~ActivationEntryMonitor(); +}; + +// [SMDOC] LiveSavedFrameCache: SavedFrame caching to minimize stack walking +// +// Since each SavedFrame object includes a 'parent' pointer to the SavedFrame +// for its caller, if we could easily find the right SavedFrame for a given +// stack frame, we wouldn't need to walk the rest of the stack. Traversing deep +// stacks can be expensive, and when we're profiling or instrumenting code, we +// may want to capture JavaScript stacks frequently, so such cases would benefit +// if we could avoid walking the entire stack. +// +// We could have a cache mapping frame addresses to their SavedFrame objects, +// but invalidating its entries would be a challenge. Popping a stack frame is +// extremely performance-sensitive, and SpiderMonkey stack frames can be OSR'd, +// thrown, rematerialized, and perhaps meet other fates; we would rather our +// cache not depend on handling so many tricky cases. +// +// It turns out that we can keep the cache accurate by reserving a single bit in +// the stack frame, which must be clear on any newly pushed frame. When we +// insert an entry into the cache mapping a given frame address to its +// SavedFrame, we set the bit in the frame. Then, we take care to probe the +// cache only for frames whose bit is set; the bit tells us that the frame has +// never left the stack, so its cache entry must be accurate, at least about +// which function the frame is executing (the line may have changed; more about +// that below). The code refers to this bit as the 'hasCachedSavedFrame' flag. +// +// We could manage such a cache replacing least-recently used entries, but we +// can do better than that: the cache can be a stack, of which we need examine +// only entries from the top. +// +// First, observe that stacks are walked from the youngest frame to the oldest, +// but SavedFrame chains are built from oldest to youngest, to ensure common +// tails are shared. This means that capturing a stack is necessarily a +// two-phase process: walk the stack, and then build the SavedFrames. +// +// Naturally, the first time we capture the stack, the cache is empty, and we +// must traverse the entire stack. As we build each SavedFrame, we push an entry +// associating the frame's address to its SavedFrame on the cache, and set the +// frame's bit. At the end, every frame has its bit set and an entry in the +// cache. +// +// Then the program runs some more. Some, none, or all of the frames are popped. +// Any new frames are pushed with their bit clear. Any frame with its bit set +// has never left the stack. The cache is left untouched. +// +// For the next capture, we walk the stack up to the first frame with its bit +// set, if there is one. Call it F; it must have a cache entry. We pop entries +// from the cache - all invalid, because they are above F's entry, and hence +// younger - until we find the entry matching F's address. Since F's bit is set, +// we know it never left the stack, and hence that no younger frame could have +// had a colliding address. And since the frame's bit was set when we pushed the +// cache entry, we know the entry is still valid. +// +// F's cache entry's SavedFrame covers the rest of the stack, so we don't need +// to walk the stack any further. Now we begin building SavedFrame objects for +// the new frames, pushing cache entries, and setting bits on the frames. By the +// end, the cache again covers the full stack, and every frame's bit is set. +// +// If we walk the stack to the end, and find no frame with its bit set, then the +// entire cache is invalid. At this point, it must be emptied, so that the new +// entries we are about to push are the only frames in the cache. +// +// For example, suppose we have the following stack (let 'A > B' mean "A called +// B", so the frames are listed oldest first): +// +// P > Q > R > S Initial stack, bits not set. +// P* > Q* > R* > S* Capture a SavedFrame stack, set bits. +// The cache now holds: P > Q > R > S. +// P* > Q* > R* Return from S. +// P* > Q* Return from R. +// P* > Q* > T > U Call T and U. New frames have clear bits. +// +// If we capture the stack now, the cache still holds: +// +// P > Q > R > S +// +// As we traverse the stack, we'll cross U and T, and then find Q with its bit +// set. We pop entries from the cache until we find the entry for Q; this +// removes entries R and S, which were indeed invalid. In Q's cache entry, we +// find the SavedFrame representing the stack P > Q. Now we build SavedFrames +// for the new portion of the stack, pushing an entry for T and setting the bit +// on the frame, and then doing the same for U. In the end, the call stack again +// has bits set on all its frames: +// +// P* > Q* > T* > U* All frames are now in the cache. +// +// And the cache again holds entries for the entire stack: +// +// P > Q > T > U +// +// Details: +// +// - When we find a cache entry whose frame address matches our frame F, we know +// that F has never left the stack, but it may certainly be the case that +// execution took place in that frame, and that the current source position +// within F's function has changed. This means that the entry's SavedFrame, +// which records the source line and column as well as the function, is not +// correct. To detect this case, when we push a cache entry, we record the +// frame's pc. When consulting the cache, if a frame's address matches but its +// pc does not, then we pop the cache entry, clear the frame's bit, and +// continue walking the stack. The next stack frame will definitely hit: since +// its callee frame never left the stack, the calling frame never got the +// chance to execute. +// +// - Generators, at least conceptually, have long-lived stack frames that +// disappear from the stack when the generator yields, and reappear on the +// stack when the generator's 'next' method is called. When a generator's +// frame is placed again atop the stack, its bit must be cleared - for the +// purposes of the cache, treating the frame as a new frame - to respect the +// invariants we used to justify the algorithm above. Async function +// activations usually appear atop empty stacks, since they are invoked as a +// promise callback, but the same rule applies. +// +// - SpiderMonkey has many types of stack frames, and not all have a place to +// store a bit indicating a cached SavedFrame. But as long as we don't create +// cache entries for frames we can't mark, simply omitting them from the cache +// is harmless. Uncacheable frame types include inlined Ion frames and +// non-Debug wasm frames. The LiveSavedFrameCache::FramePtr type represents +// only pointers to frames that can be cached, so if you have a FramePtr, you +// don't need to further check the frame for cachability. FramePtr provides +// access to the hasCachedSavedFrame bit. +// +// - We actually break up the cache into one cache per Activation. Popping an +// activation invalidates all its cache entries, simply by freeing the cache +// altogether. +// +// - The entire chain of SavedFrames for a given stack capture is created in the +// compartment of the code that requested the capture, *not* in that of the +// frames it represents, so in general, different compartments may have +// different SavedFrame objects representing the same actual stack frame. The +// LiveSavedFrameCache simply records whichever SavedFrames were used in the +// most recent captures. When we find a cache hit, we check the entry's +// SavedFrame's compartment against the current compartment; if they do not +// match, we clear the entire cache. +// +// This means that it is not always true that, if a frame's +// hasCachedSavedFrame bit is set, it must have an entry in the cache. The +// actual invariant is: either the cache is completely empty, or the frames' +// bits are trustworthy. This invariant holds even though capture can be +// interrupted at many places by OOM failures. Clearing the cache is a single, +// uninterruptible step. When we try to look up a frame whose bit is set and +// find an empty cache, we clear the frame's bit. And we only add the first +// frame to an empty cache once we've walked the stack all the way, so we know +// that all frames' bits are cleared by that point. +// +// - When the Debugger API evaluates an expression in some frame (the 'target +// frame'), it's SpiderMonkey's convention that the target frame be treated as +// the parent of the eval frame. In reality, of course, the eval frame is +// pushed on the top of the stack like any other frame, but stack captures +// simply jump straight over the intervening frames, so that the '.parent' +// property of a SavedFrame for the eval is the SavedFrame for the target. +// This is arranged by giving the eval frame an 'evalInFramePrev` link +// pointing to the target, which an ordinary FrameIter will notice and +// respect. +// +// If the LiveSavedFrameCache were presented with stack traversals that +// skipped frames in this way, it would cause havoc. First, with no debugger +// eval frames present, capture the stack, populating the cache. Then push a +// debugger eval frame and capture again; the skipped frames to appear to be +// absent from the stack. Now pop the debugger eval frame, and capture a third +// time: the no-longer-skipped frames seem to reappear on the stack, with +// their cached bits still set. +// +// The LiveSavedFrameCache assumes that the stack it sees is used in a +// stack-like fashion: if a frame has its bit set, it has never left the +// stack. To support this assumption, when the cache is in use, we do not skip +// the frames between a debugger eval frame an its target; we always traverse +// the entire stack, invalidating and populating the cache in the usual way. +// Instead, when we construct a SavedFrame for a debugger eval frame, we +// select the appropriate parent at that point: rather than the next-older +// frame, we find the SavedFrame for the eval's target frame. The skip appears +// in the SavedFrame chains, even as the traversal covers all the frames. +// +// - Rematerialized frames (see ../jit/RematerializedFrame.h) are always created +// with their hasCachedSavedFrame bits clear: although there may be extant +// SavedFrames built from the original IonMonkey frame, the Rematerialized +// frames will not have cache entries for them until they are traversed in a +// capture themselves. +// +// This means that, oddly, it is not always true that, once we reach a frame +// with its hasCachedSavedFrame bit set, all its parents will have the bit set +// as well. However, clear bits under younger set bits will only occur on +// Rematerialized frames. +class LiveSavedFrameCache { + public: + // The address of a live frame for which we can cache SavedFrames: it has a + // 'hasCachedSavedFrame' bit we can examine and set, and can be converted to + // a Key to index the cache. + class FramePtr { + // We use jit::CommonFrameLayout for both Baseline frames and Ion + // physical frames. + using Ptr = mozilla::Variant; + + Ptr ptr; + + template + explicit FramePtr(Frame ptr) : ptr(ptr) {} + + struct HasCachedMatcher; + struct SetHasCachedMatcher; + struct ClearHasCachedMatcher; + + public: + // If iter's frame is of a type that can be cached, construct a FramePtr + // for its frame. Otherwise, return Nothing. + static inline mozilla::Maybe create(const FrameIter& iter); + + inline bool hasCachedSavedFrame() const; + inline void setHasCachedSavedFrame(); + inline void clearHasCachedSavedFrame(); + + // Return true if this FramePtr refers to an interpreter frame. + inline bool isInterpreterFrame() const { + return ptr.is(); + } + + // If this FramePtr is an interpreter frame, return a pointer to it. + inline InterpreterFrame& asInterpreterFrame() const { + return *ptr.as(); + } + + // Return true if this FramePtr refers to a rematerialized frame. + inline bool isRematerializedFrame() const { + return ptr.is(); + } + + bool operator==(const FramePtr& rhs) const { return rhs.ptr == this->ptr; } + bool operator!=(const FramePtr& rhs) const { return !(rhs == *this); } + }; + + private: + // A key in the cache: the address of a frame, live or dead, for which we + // can cache SavedFrames. Since the pointer may not be live, the only + // operation this type permits is comparison. + class Key { + FramePtr framePtr; + + public: + MOZ_IMPLICIT Key(const FramePtr& framePtr) : framePtr(framePtr) {} + + bool operator==(const Key& rhs) const { + return rhs.framePtr == this->framePtr; + } + bool operator!=(const Key& rhs) const { return !(rhs == *this); } + }; + + struct Entry { + const Key key; + const jsbytecode* pc; + HeapPtr savedFrame; + + Entry(const Key& key, const jsbytecode* pc, SavedFrame* savedFrame) + : key(key), pc(pc), savedFrame(savedFrame) {} + }; + + using EntryVector = Vector; + EntryVector* frames; + + LiveSavedFrameCache(const LiveSavedFrameCache&) = delete; + LiveSavedFrameCache& operator=(const LiveSavedFrameCache&) = delete; + + public: + explicit LiveSavedFrameCache() : frames(nullptr) {} + + LiveSavedFrameCache(LiveSavedFrameCache&& rhs) : frames(rhs.frames) { + MOZ_ASSERT(this != &rhs, "self-move disallowed"); + rhs.frames = nullptr; + } + + ~LiveSavedFrameCache() { + if (frames) { + js_delete(frames); + frames = nullptr; + } + } + + bool initialized() const { return !!frames; } + bool init(JSContext* cx) { + frames = js_new(); + if (!frames) { + JS_ReportOutOfMemory(cx); + return false; + } + return true; + } + + void trace(JSTracer* trc); + + // Set |frame| to the cached SavedFrame corresponding to |framePtr| at |pc|. + // |framePtr|'s hasCachedSavedFrame bit must be set. Remove all cache + // entries for frames younger than that one. + // + // This may set |frame| to nullptr if |pc| is different from the pc supplied + // when the cache entry was inserted. In this case, the cached SavedFrame + // (probably) has the wrong source position. Entries for younger frames are + // still removed. The next frame, if any, will be a cache hit. + // + // This may also set |frame| to nullptr if the cache was populated with + // SavedFrame objects for a different compartment than cx's current + // compartment. In this case, the entire cache is flushed. + void find(JSContext* cx, FramePtr& framePtr, const jsbytecode* pc, + MutableHandleSavedFrame frame) const; + + // Search the cache for a frame matching |framePtr|, without removing any + // entries. Return the matching saved frame, or nullptr if none is found. + // This is used for resolving |evalInFramePrev| links. + void findWithoutInvalidation(const FramePtr& framePtr, + MutableHandleSavedFrame frame) const; + + // Push a cache entry mapping |framePtr| and |pc| to |savedFrame| on the top + // of the cache's stack. You must insert entries for frames from oldest to + // youngest. They must all be younger than the frame that the |find| method + // found a hit for; or you must have cleared the entire cache with the + // |clear| method. + bool insert(JSContext* cx, FramePtr&& framePtr, const jsbytecode* pc, + HandleSavedFrame savedFrame); + + // Remove all entries from the cache. + void clear() { + if (frames) frames->clear(); + } +}; + +static_assert( + sizeof(LiveSavedFrameCache) == sizeof(uintptr_t), + "Every js::Activation has a LiveSavedFrameCache, so we need to be pretty " + "careful " + "about avoiding bloat. If you're adding members to LiveSavedFrameCache, " + "maybe you " + "should consider figuring out a way to make js::Activation have a " + "LiveSavedFrameCache* instead of a Rooted."); + +class Activation { + protected: + JSContext* cx_; + JS::Compartment* compartment_; + Activation* prev_; + Activation* prevProfiling_; + + // Counter incremented by JS::HideScriptedCaller and decremented by + // JS::UnhideScriptedCaller. If > 0 for the top activation, + // DescribeScriptedCaller will return null instead of querying that + // activation, which should prompt the caller to consult embedding-specific + // data structures instead. + size_t hideScriptedCallerCount_; + + // The cache of SavedFrame objects we have already captured when walking + // this activation's stack. + JS::Rooted frameCache_; + + // Youngest saved frame of an async stack that will be iterated during stack + // capture in place of the actual stack of previous activations. Note that + // the stack of this activation is captured entirely before this is used. + // + // Usually this is nullptr, meaning that normal stack capture will occur. + // When this is set, the stack of any previous activation is ignored. + JS::Rooted asyncStack_; + + // Value of asyncCause to be attached to asyncStack_. + const char* asyncCause_; + + // True if the async call was explicitly requested, e.g. via + // callFunctionWithAsyncStack. + bool asyncCallIsExplicit_; + + enum Kind { Interpreter, Jit }; + Kind kind_; + + inline Activation(JSContext* cx, Kind kind); + inline ~Activation(); + + public: + JSContext* cx() const { return cx_; } + JS::Compartment* compartment() const { return compartment_; } + Activation* prev() const { return prev_; } + Activation* prevProfiling() const { return prevProfiling_; } + inline Activation* mostRecentProfiling(); + + bool isInterpreter() const { return kind_ == Interpreter; } + bool isJit() const { return kind_ == Jit; } + inline bool hasWasmExitFP() const; + + inline bool isProfiling() const; + void registerProfiling(); + void unregisterProfiling(); + + InterpreterActivation* asInterpreter() const { + MOZ_ASSERT(isInterpreter()); + return (InterpreterActivation*)this; + } + jit::JitActivation* asJit() const { + MOZ_ASSERT(isJit()); + return (jit::JitActivation*)this; + } + + void hideScriptedCaller() { hideScriptedCallerCount_++; } + void unhideScriptedCaller() { + MOZ_ASSERT(hideScriptedCallerCount_ > 0); + hideScriptedCallerCount_--; + } + bool scriptedCallerIsHidden() const { return hideScriptedCallerCount_ > 0; } + + static size_t offsetOfPrev() { return offsetof(Activation, prev_); } + static size_t offsetOfPrevProfiling() { + return offsetof(Activation, prevProfiling_); + } + + SavedFrame* asyncStack() { return asyncStack_; } + + const char* asyncCause() const { return asyncCause_; } + + bool asyncCallIsExplicit() const { return asyncCallIsExplicit_; } + + inline LiveSavedFrameCache* getLiveSavedFrameCache(JSContext* cx); + void clearLiveSavedFrameCache() { frameCache_.get().clear(); } + + private: + Activation(const Activation& other) = delete; + void operator=(const Activation& other) = delete; +}; + +// This variable holds a special opcode value which is greater than all normal +// opcodes, and is chosen such that the bitwise or of this value with any +// opcode is this value. +constexpr jsbytecode EnableInterruptsPseudoOpcode = -1; + +static_assert(EnableInterruptsPseudoOpcode >= JSOP_LIMIT, + "EnableInterruptsPseudoOpcode must be greater than any opcode"); +static_assert( + EnableInterruptsPseudoOpcode == jsbytecode(-1), + "EnableInterruptsPseudoOpcode must be the maximum jsbytecode value"); + +class InterpreterFrameIterator; +class RunState; + +class InterpreterActivation : public Activation { + friend class js::InterpreterFrameIterator; + + InterpreterRegs regs_; + InterpreterFrame* entryFrame_; + size_t opMask_; // For debugger interrupts, see js::Interpret. + +#ifdef DEBUG + size_t oldFrameCount_; +#endif + + public: + inline InterpreterActivation(RunState& state, JSContext* cx, + InterpreterFrame* entryFrame); + inline ~InterpreterActivation(); + + inline bool pushInlineFrame(const JS::CallArgs& args, + JS::Handle script, + MaybeConstruct constructing); + inline void popInlineFrame(InterpreterFrame* frame); + + inline bool resumeGeneratorFrame(JS::Handle callee, + JS::Handle envChain); + + InterpreterFrame* current() const { return regs_.fp(); } + InterpreterRegs& regs() { return regs_; } + InterpreterFrame* entryFrame() const { return entryFrame_; } + size_t opMask() const { return opMask_; } + + bool isProfiling() const { return false; } + + // If this js::Interpret frame is running |script|, enable interrupts. + void enableInterruptsIfRunning(JSScript* script) { + if (regs_.fp()->script() == script) { + enableInterruptsUnconditionally(); + } + } + void enableInterruptsUnconditionally() { + opMask_ = EnableInterruptsPseudoOpcode; + } + void clearInterruptsMask() { opMask_ = 0; } +}; + +// Iterates over a thread's activation list. +class ActivationIterator { + protected: + Activation* activation_; + + public: + explicit ActivationIterator(JSContext* cx); + + ActivationIterator& operator++(); + + Activation* operator->() const { return activation_; } + Activation* activation() const { return activation_; } + bool done() const { return activation_ == nullptr; } +}; + +} // namespace js + +#endif // vm_Activation_h diff --git a/js/src/vm/ArgumentsObject-inl.h b/js/src/vm/ArgumentsObject-inl.h new file mode 100644 index 0000000000..e8ac311b2e --- /dev/null +++ b/js/src/vm/ArgumentsObject-inl.h @@ -0,0 +1,66 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArgumentsObject_inl_h +#define vm_ArgumentsObject_inl_h + +#include "vm/ArgumentsObject.h" + +#include "vm/EnvironmentObject.h" + +#include "vm/EnvironmentObject-inl.h" +#include "vm/JSScript-inl.h" + +namespace js { + +inline const Value& ArgumentsObject::element(uint32_t i) const { + MOZ_ASSERT(!isElementDeleted(i)); + const Value& v = data()->args[i]; + if (IsMagicScopeSlotValue(v)) { + CallObject& callobj = + getFixedSlot(MAYBE_CALL_SLOT).toObject().as(); + return callobj.aliasedFormalFromArguments(v); + } + return v; +} + +inline void ArgumentsObject::setElement(uint32_t i, const Value& v) { + MOZ_ASSERT(!isElementDeleted(i)); + GCPtrValue& lhs = data()->args[i]; + if (IsMagicScopeSlotValue(lhs)) { + uint32_t slot = SlotFromMagicScopeSlotValue(lhs); + CallObject& callobj = + getFixedSlot(MAYBE_CALL_SLOT).toObject().as(); + for (Shape::Range r(callobj.lastProperty()); !r.empty(); + r.popFront()) { + if (r.front().slot() == slot) { + callobj.setAliasedFormalFromArguments(lhs, v); + return; + } + } + MOZ_CRASH("Bad Arguments::setElement"); + } + lhs = v; +} + +inline bool ArgumentsObject::maybeGetElements(uint32_t start, uint32_t count, + Value* vp) { + MOZ_ASSERT(start + count >= start); + + uint32_t length = initialLength(); + if (start > length || start + count > length || isAnyElementDeleted()) { + return false; + } + + for (uint32_t i = start, end = start + count; i < end; ++i, ++vp) { + *vp = element(i); + } + return true; +} + +} /* namespace js */ + +#endif /* vm_ArgumentsObject_inl_h */ diff --git a/js/src/vm/ArgumentsObject.cpp b/js/src/vm/ArgumentsObject.cpp new file mode 100644 index 0000000000..1ca7cd2d8d --- /dev/null +++ b/js/src/vm/ArgumentsObject.cpp @@ -0,0 +1,1041 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/ArgumentsObject-inl.h" + +#include "mozilla/PodOperations.h" + +#include + +#include "gc/FreeOp.h" +#include "jit/CalleeToken.h" +#include "jit/JitFrames.h" +#include "util/BitArray.h" +#include "vm/AsyncFunction.h" +#include "vm/GlobalObject.h" +#include "vm/Stack.h" + +#include "gc/Nursery-inl.h" +#include "vm/FrameIter-inl.h" // js::FrameIter::unaliasedForEachActual +#include "vm/JSObject-inl.h" +#include "vm/NativeObject-inl.h" +#include "vm/Stack-inl.h" + +using namespace js; + +/* static */ +size_t RareArgumentsData::bytesRequired(size_t numActuals) { + size_t extraBytes = NumWordsForBitArrayOfLength(numActuals) * sizeof(size_t); + return offsetof(RareArgumentsData, deletedBits_) + extraBytes; +} + +/* static */ +RareArgumentsData* RareArgumentsData::create(JSContext* cx, + ArgumentsObject* obj) { + size_t bytes = RareArgumentsData::bytesRequired(obj->initialLength()); + + uint8_t* data = AllocateObjectBuffer(cx, obj, bytes); + if (!data) { + return nullptr; + } + + mozilla::PodZero(data, bytes); + + AddCellMemory(obj, bytes, MemoryUse::RareArgumentsData); + + return new (data) RareArgumentsData(); +} + +bool ArgumentsObject::createRareData(JSContext* cx) { + MOZ_ASSERT(!data()->rareData); + + RareArgumentsData* rareData = RareArgumentsData::create(cx, this); + if (!rareData) { + return false; + } + + data()->rareData = rareData; + return true; +} + +bool ArgumentsObject::markElementDeleted(JSContext* cx, uint32_t i) { + RareArgumentsData* data = getOrCreateRareData(cx); + if (!data) { + return false; + } + + data->markElementDeleted(initialLength(), i); + return true; +} + +static void CopyStackFrameArguments(const AbstractFramePtr frame, + GCPtrValue* dst, unsigned totalArgs) { + MOZ_ASSERT_IF(frame.isInterpreterFrame(), + !frame.asInterpreterFrame()->runningInJit()); + + MOZ_ASSERT(std::max(frame.numActualArgs(), frame.numFormalArgs()) == + totalArgs); + + /* Copy arguments. */ + Value* src = frame.argv(); + Value* end = src + totalArgs; + while (src != end) { + (dst++)->init(*src++); + } +} + +/* static */ +void ArgumentsObject::MaybeForwardToCallObject(AbstractFramePtr frame, + ArgumentsObject* obj, + ArgumentsData* data) { + JSScript* script = frame.script(); + if (frame.callee()->needsCallObject() && script->argumentsAliasesFormals()) { + obj->initFixedSlot(MAYBE_CALL_SLOT, ObjectValue(frame.callObj())); + for (PositionalFormalParameterIter fi(script); fi; fi++) { + if (fi.closedOver()) { + data->args[fi.argumentSlot()] = MagicEnvSlotValue(fi.location().slot()); + } + } + } +} + +/* static */ +void ArgumentsObject::MaybeForwardToCallObject(jit::JitFrameLayout* frame, + HandleObject callObj, + ArgumentsObject* obj, + ArgumentsData* data) { + JSFunction* callee = jit::CalleeTokenToFunction(frame->calleeToken()); + JSScript* script = callee->nonLazyScript(); + if (callee->needsCallObject() && script->argumentsAliasesFormals()) { + MOZ_ASSERT(callObj && callObj->is()); + obj->initFixedSlot(MAYBE_CALL_SLOT, ObjectValue(*callObj.get())); + for (PositionalFormalParameterIter fi(script); fi; fi++) { + if (fi.closedOver()) { + data->args[fi.argumentSlot()] = MagicEnvSlotValue(fi.location().slot()); + } + } + } +} + +struct CopyFrameArgs { + AbstractFramePtr frame_; + + explicit CopyFrameArgs(AbstractFramePtr frame) : frame_(frame) {} + + void copyArgs(JSContext*, GCPtrValue* dst, unsigned totalArgs) const { + CopyStackFrameArguments(frame_, dst, totalArgs); + } + + /* + * If a call object exists and the arguments object aliases formals, the + * call object is the canonical location for formals. + */ + void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) { + ArgumentsObject::MaybeForwardToCallObject(frame_, obj, data); + } +}; + +struct CopyJitFrameArgs { + jit::JitFrameLayout* frame_; + HandleObject callObj_; + + CopyJitFrameArgs(jit::JitFrameLayout* frame, HandleObject callObj) + : frame_(frame), callObj_(callObj) {} + + void copyArgs(JSContext*, GCPtrValue* dstBase, unsigned totalArgs) const { + unsigned numActuals = frame_->numActualArgs(); + unsigned numFormals = + jit::CalleeTokenToFunction(frame_->calleeToken())->nargs(); + MOZ_ASSERT(numActuals <= totalArgs); + MOZ_ASSERT(numFormals <= totalArgs); + MOZ_ASSERT(std::max(numActuals, numFormals) == totalArgs); + + /* Copy all arguments. */ + Value* src = frame_->argv() + 1; /* +1 to skip this. */ + Value* end = src + numActuals; + GCPtrValue* dst = dstBase; + while (src != end) { + (dst++)->init(*src++); + } + + if (numActuals < numFormals) { + GCPtrValue* dstEnd = dstBase + totalArgs; + while (dst != dstEnd) { + (dst++)->init(UndefinedValue()); + } + } + } + + /* + * If a call object exists and the arguments object aliases formals, the + * call object is the canonical location for formals. + */ + void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) { + ArgumentsObject::MaybeForwardToCallObject(frame_, callObj_, obj, data); + } +}; + +struct CopyScriptFrameIterArgs { + ScriptFrameIter& iter_; + + explicit CopyScriptFrameIterArgs(ScriptFrameIter& iter) : iter_(iter) {} + + void copyArgs(JSContext* cx, GCPtrValue* dstBase, unsigned totalArgs) const { + /* Copy actual arguments. */ + iter_.unaliasedForEachActual(cx, CopyToHeap(dstBase)); + + /* Define formals which are not part of the actuals. */ + unsigned numActuals = iter_.numActualArgs(); + unsigned numFormals = iter_.calleeTemplate()->nargs(); + MOZ_ASSERT(numActuals <= totalArgs); + MOZ_ASSERT(numFormals <= totalArgs); + MOZ_ASSERT(std::max(numActuals, numFormals) == totalArgs); + + if (numActuals < numFormals) { + GCPtrValue* dst = dstBase + numActuals; + GCPtrValue* dstEnd = dstBase + totalArgs; + while (dst != dstEnd) { + (dst++)->init(UndefinedValue()); + } + } + } + + /* + * Ion frames are copying every argument onto the stack, other locations are + * invalid. + */ + void maybeForwardToCallObject(ArgumentsObject* obj, ArgumentsData* data) { + if (!iter_.isIon()) { + ArgumentsObject::MaybeForwardToCallObject(iter_.abstractFramePtr(), obj, + data); + } + } +}; + +ArgumentsObject* ArgumentsObject::createTemplateObject(JSContext* cx, + bool mapped) { + const JSClass* clasp = mapped ? &MappedArgumentsObject::class_ + : &UnmappedArgumentsObject::class_; + + RootedObject proto( + cx, GlobalObject::getOrCreateObjectPrototype(cx, cx->global())); + if (!proto) { + return nullptr; + } + + RootedObjectGroup group( + cx, ObjectGroup::defaultNewGroup(cx, clasp, TaggedProto(proto.get()))); + if (!group) { + return nullptr; + } + + RootedShape shape( + cx, EmptyShape::getInitialShape(cx, clasp, TaggedProto(proto), + FINALIZE_KIND, BaseShape::INDEXED)); + if (!shape) { + return nullptr; + } + + AutoSetNewObjectMetadata metadata(cx); + JSObject* base; + JS_TRY_VAR_OR_RETURN_NULL( + cx, base, + NativeObject::create(cx, FINALIZE_KIND, gc::TenuredHeap, shape, group)); + + ArgumentsObject* obj = &base->as(); + obj->initFixedSlot(ArgumentsObject::DATA_SLOT, PrivateValue(nullptr)); + return obj; +} + +ArgumentsObject* Realm::maybeArgumentsTemplateObject(bool mapped) const { + return mapped ? mappedArgumentsTemplate_ : unmappedArgumentsTemplate_; +} + +ArgumentsObject* Realm::getOrCreateArgumentsTemplateObject(JSContext* cx, + bool mapped) { + WeakHeapPtr& obj = + mapped ? mappedArgumentsTemplate_ : unmappedArgumentsTemplate_; + + ArgumentsObject* templateObj = obj; + if (templateObj) { + return templateObj; + } + + templateObj = ArgumentsObject::createTemplateObject(cx, mapped); + if (!templateObj) { + return nullptr; + } + + obj.set(templateObj); + return templateObj; +} + +template +/* static */ +ArgumentsObject* ArgumentsObject::create(JSContext* cx, HandleFunction callee, + unsigned numActuals, CopyArgs& copy) { + bool mapped = callee->baseScript()->hasMappedArgsObj(); + ArgumentsObject* templateObj = + cx->realm()->getOrCreateArgumentsTemplateObject(cx, mapped); + if (!templateObj) { + return nullptr; + } + + RootedShape shape(cx, templateObj->lastProperty()); + RootedObjectGroup group(cx, templateObj->group()); + + unsigned numFormals = callee->nargs(); + unsigned numArgs = std::max(numActuals, numFormals); + unsigned numBytes = ArgumentsData::bytesRequired(numArgs); + + Rooted obj(cx); + ArgumentsData* data = nullptr; + { + // The copyArgs call below can allocate objects, so add this block scope + // to make sure we set the metadata for this arguments object first. + AutoSetNewObjectMetadata metadata(cx); + + JSObject* base; + JS_TRY_VAR_OR_RETURN_NULL( + cx, base, + NativeObject::create(cx, FINALIZE_KIND, gc::DefaultHeap, shape, group)); + obj = &base->as(); + + data = reinterpret_cast( + AllocateObjectBuffer(cx, obj, numBytes)); + if (!data) { + // Make the object safe for GC. + obj->initFixedSlot(DATA_SLOT, PrivateValue(nullptr)); + return nullptr; + } + + data->numArgs = numArgs; + data->rareData = nullptr; + + // Initialize |args| with a pattern that is safe for GC tracing. + for (unsigned i = 0; i < numArgs; i++) { + data->args[i].init(UndefinedValue()); + } + + InitReservedSlot(obj, DATA_SLOT, data, numBytes, MemoryUse::ArgumentsData); + obj->initFixedSlot(CALLEE_SLOT, ObjectValue(*callee)); + } + MOZ_ASSERT(data != nullptr); + + /* Copy [0, numArgs) into data->slots. */ + copy.copyArgs(cx, data->args, numArgs); + + obj->initFixedSlot(INITIAL_LENGTH_SLOT, + Int32Value(numActuals << PACKED_BITS_COUNT)); + + copy.maybeForwardToCallObject(obj, data); + + MOZ_ASSERT(obj->initialLength() == numActuals); + MOZ_ASSERT(!obj->hasOverriddenLength()); + return obj; +} + +ArgumentsObject* ArgumentsObject::createExpected(JSContext* cx, + AbstractFramePtr frame) { + MOZ_ASSERT(frame.script()->needsArgsObj()); + RootedFunction callee(cx, frame.callee()); + CopyFrameArgs copy(frame); + ArgumentsObject* argsobj = create(cx, callee, frame.numActualArgs(), copy); + if (!argsobj) { + return nullptr; + } + + frame.initArgsObj(*argsobj); + return argsobj; +} + +ArgumentsObject* ArgumentsObject::createUnexpected(JSContext* cx, + ScriptFrameIter& iter) { + RootedFunction callee(cx, iter.callee(cx)); + CopyScriptFrameIterArgs copy(iter); + return create(cx, callee, iter.numActualArgs(), copy); +} + +ArgumentsObject* ArgumentsObject::createUnexpected(JSContext* cx, + AbstractFramePtr frame) { + RootedFunction callee(cx, frame.callee()); + CopyFrameArgs copy(frame); + return create(cx, callee, frame.numActualArgs(), copy); +} + +ArgumentsObject* ArgumentsObject::createForIon(JSContext* cx, + jit::JitFrameLayout* frame, + HandleObject scopeChain) { + jit::CalleeToken token = frame->calleeToken(); + MOZ_ASSERT(jit::CalleeTokenIsFunction(token)); + RootedFunction callee(cx, jit::CalleeTokenToFunction(token)); + RootedObject callObj( + cx, scopeChain->is() ? scopeChain.get() : nullptr); + CopyJitFrameArgs copy(frame, callObj); + return create(cx, callee, frame->numActualArgs(), copy); +} + +/* static */ +ArgumentsObject* ArgumentsObject::finishForIonPure(JSContext* cx, + jit::JitFrameLayout* frame, + JSObject* scopeChain, + ArgumentsObject* obj) { + // JIT code calls this directly (no callVM), because it's faster, so we're + // not allowed to GC in here. + AutoUnsafeCallWithABI unsafe; + + JSFunction* callee = jit::CalleeTokenToFunction(frame->calleeToken()); + RootedObject callObj(cx, scopeChain->is() ? scopeChain : nullptr); + CopyJitFrameArgs copy(frame, callObj); + + unsigned numActuals = frame->numActualArgs(); + unsigned numFormals = callee->nargs(); + unsigned numArgs = std::max(numActuals, numFormals); + unsigned numBytes = ArgumentsData::bytesRequired(numArgs); + + ArgumentsData* data = reinterpret_cast( + AllocateObjectBuffer(cx, obj, numBytes)); + if (!data) { + // Make the object safe for GC. Don't report OOM, the slow path will + // retry the allocation. + cx->recoverFromOutOfMemory(); + obj->initFixedSlot(DATA_SLOT, PrivateValue(nullptr)); + return nullptr; + } + + data->numArgs = numArgs; + data->rareData = nullptr; + + obj->initFixedSlot(INITIAL_LENGTH_SLOT, + Int32Value(numActuals << PACKED_BITS_COUNT)); + obj->initFixedSlot(DATA_SLOT, PrivateValue(data)); + AddCellMemory(obj, numBytes, MemoryUse::ArgumentsData); + obj->initFixedSlot(MAYBE_CALL_SLOT, UndefinedValue()); + obj->initFixedSlot(CALLEE_SLOT, ObjectValue(*callee)); + + copy.copyArgs(cx, data->args, numArgs); + + if (callObj && callee->needsCallObject()) { + copy.maybeForwardToCallObject(obj, data); + } + + MOZ_ASSERT(obj->initialLength() == numActuals); + MOZ_ASSERT(!obj->hasOverriddenLength()); + return obj; +} + +/* static */ +bool ArgumentsObject::obj_delProperty(JSContext* cx, HandleObject obj, + HandleId id, ObjectOpResult& result) { + ArgumentsObject& argsobj = obj->as(); + if (JSID_IS_INT(id)) { + unsigned arg = unsigned(JSID_TO_INT(id)); + if (arg < argsobj.initialLength() && !argsobj.isElementDeleted(arg)) { + if (!argsobj.markElementDeleted(cx, arg)) { + return false; + } + } + } else if (JSID_IS_ATOM(id, cx->names().length)) { + argsobj.markLengthOverridden(); + } else if (JSID_IS_ATOM(id, cx->names().callee)) { + argsobj.as().markCalleeOverridden(); + } else if (JSID_IS_SYMBOL(id) && + JSID_TO_SYMBOL(id) == cx->wellKnownSymbols().iterator) { + argsobj.markIteratorOverridden(); + } + return result.succeed(); +} + +/* static */ +bool ArgumentsObject::obj_mayResolve(const JSAtomState& names, jsid id, + JSObject*) { + // Arguments might resolve indexes, Symbol.iterator, or length/callee. + if (JSID_IS_ATOM(id)) { + JSAtom* atom = JSID_TO_ATOM(id); + uint32_t index; + if (atom->isIndex(&index)) { + return true; + } + return atom == names.length || atom == names.callee; + } + + return id.isInt() || id.isWellKnownSymbol(JS::SymbolCode::iterator); +} + +static bool MappedArgGetter(JSContext* cx, HandleObject obj, HandleId id, + MutableHandleValue vp) { + MappedArgumentsObject& argsobj = obj->as(); + if (JSID_IS_INT(id)) { + /* + * arg can exceed the number of arguments if a script changed the + * prototype to point to another Arguments object with a bigger argc. + */ + unsigned arg = unsigned(JSID_TO_INT(id)); + if (arg < argsobj.initialLength() && !argsobj.isElementDeleted(arg)) { + vp.set(argsobj.element(arg)); + } + } else if (JSID_IS_ATOM(id, cx->names().length)) { + if (!argsobj.hasOverriddenLength()) { + vp.setInt32(argsobj.initialLength()); + } + } else { + MOZ_ASSERT(JSID_IS_ATOM(id, cx->names().callee)); + if (!argsobj.hasOverriddenCallee()) { + vp.setObject(argsobj.callee()); + } + } + return true; +} + +static bool MappedArgSetter(JSContext* cx, HandleObject obj, HandleId id, + HandleValue v, ObjectOpResult& result) { + if (!obj->is()) { + return result.succeed(); + } + Handle argsobj = obj.as(); + + Rooted desc(cx); + if (!GetOwnPropertyDescriptor(cx, argsobj, id, &desc)) { + return false; + } + MOZ_ASSERT(desc.object()); + unsigned attrs = desc.attributes(); + MOZ_ASSERT(!(attrs & JSPROP_READONLY)); + attrs &= (JSPROP_ENUMERATE | JSPROP_PERMANENT); /* only valid attributes */ + + if (JSID_IS_INT(id)) { + unsigned arg = unsigned(JSID_TO_INT(id)); + if (arg < argsobj->initialLength() && !argsobj->isElementDeleted(arg)) { + argsobj->setElement(arg, v); + return result.succeed(); + } + } else { + MOZ_ASSERT(JSID_IS_ATOM(id, cx->names().length) || + JSID_IS_ATOM(id, cx->names().callee)); + } + + /* + * For simplicity we use delete/define to replace the property with a + * simple data property. Note that we rely on ArgumentsObject::obj_delProperty + * to set the corresponding override-bit. + * Note also that we must define the property instead of setting it in case + * the user has changed the prototype to an object that has a setter for + * this id. + */ + ObjectOpResult ignored; + return NativeDeleteProperty(cx, argsobj, id, ignored) && + NativeDefineDataProperty(cx, argsobj, id, v, attrs, result); +} + +/* static */ +bool ArgumentsObject::getArgumentsIterator(JSContext* cx, + MutableHandleValue val) { + HandlePropertyName shName = cx->names().ArrayValues; + RootedAtom name(cx, cx->names().values); + return GlobalObject::getSelfHostedFunction(cx, cx->global(), shName, name, 0, + val); +} + +/* static */ +bool ArgumentsObject::reifyLength(JSContext* cx, Handle obj) { + if (obj->hasOverriddenLength()) { + return true; + } + + RootedId id(cx, NameToId(cx->names().length)); + RootedValue val(cx, Int32Value(obj->initialLength())); + if (!NativeDefineDataProperty(cx, obj, id, val, JSPROP_RESOLVING)) { + return false; + } + + obj->markLengthOverridden(); + return true; +} + +/* static */ +bool ArgumentsObject::reifyIterator(JSContext* cx, + Handle obj) { + if (obj->hasOverriddenIterator()) { + return true; + } + + RootedId iteratorId(cx, SYMBOL_TO_JSID(cx->wellKnownSymbols().iterator)); + RootedValue val(cx); + if (!ArgumentsObject::getArgumentsIterator(cx, &val)) { + return false; + } + if (!NativeDefineDataProperty(cx, obj, iteratorId, val, JSPROP_RESOLVING)) { + return false; + } + + obj->markIteratorOverridden(); + return true; +} + +/* static */ +bool MappedArgumentsObject::obj_resolve(JSContext* cx, HandleObject obj, + HandleId id, bool* resolvedp) { + Rooted argsobj(cx, &obj->as()); + + if (JSID_IS_SYMBOL(id) && + JSID_TO_SYMBOL(id) == cx->wellKnownSymbols().iterator) { + if (argsobj->hasOverriddenIterator()) { + return true; + } + + if (!reifyIterator(cx, argsobj)) { + return false; + } + *resolvedp = true; + return true; + } + + unsigned attrs = JSPROP_RESOLVING; + if (JSID_IS_INT(id)) { + uint32_t arg = uint32_t(JSID_TO_INT(id)); + if (arg >= argsobj->initialLength() || argsobj->isElementDeleted(arg)) { + return true; + } + + attrs |= JSPROP_ENUMERATE; + } else if (JSID_IS_ATOM(id, cx->names().length)) { + if (argsobj->hasOverriddenLength()) { + return true; + } + } else { + if (!JSID_IS_ATOM(id, cx->names().callee)) { + return true; + } + + if (argsobj->hasOverriddenCallee()) { + return true; + } + } + + if (!NativeDefineAccessorProperty(cx, argsobj, id, MappedArgGetter, + MappedArgSetter, attrs)) { + return false; + } + + *resolvedp = true; + return true; +} + +/* static */ +bool MappedArgumentsObject::obj_enumerate(JSContext* cx, HandleObject obj) { + Rooted argsobj(cx, &obj->as()); + + RootedId id(cx); + bool found; + + // Trigger reflection. + id = NameToId(cx->names().length); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + id = NameToId(cx->names().callee); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + id = SYMBOL_TO_JSID(cx->wellKnownSymbols().iterator); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + for (unsigned i = 0; i < argsobj->initialLength(); i++) { + id = INT_TO_JSID(i); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + } + + return true; +} + +// ES 2017 draft 9.4.4.2 +/* static */ +bool MappedArgumentsObject::obj_defineProperty(JSContext* cx, HandleObject obj, + HandleId id, + Handle desc, + ObjectOpResult& result) { + // Step 1. + Rooted argsobj(cx, &obj->as()); + + // Steps 2-3. + bool isMapped = false; + if (JSID_IS_INT(id)) { + unsigned arg = unsigned(JSID_TO_INT(id)); + isMapped = + arg < argsobj->initialLength() && !argsobj->isElementDeleted(arg); + } + + // Step 4. + Rooted newArgDesc(cx, desc); + + // Step 5. + if (!desc.isAccessorDescriptor() && isMapped) { + // Step 5.a. + if (desc.hasWritable() && !desc.writable()) { + if (!desc.hasValue()) { + RootedValue v(cx, argsobj->element(JSID_TO_INT(id))); + newArgDesc.setValue(v); + } + newArgDesc.setGetter(nullptr); + newArgDesc.setSetter(nullptr); + } else { + // In this case the live mapping is supposed to keep working, + // we have to pass along the Getter/Setter otherwise they are + // overwritten. + newArgDesc.setGetter(MappedArgGetter); + newArgDesc.setSetter(MappedArgSetter); + newArgDesc.value().setUndefined(); + newArgDesc.attributesRef() |= JSPROP_IGNORE_VALUE; + } + } + + // Step 6. NativeDefineProperty will lookup [[Value]] for us. + if (!NativeDefineProperty(cx, obj.as(), id, newArgDesc, + result)) { + return false; + } + // Step 7. + if (!result.ok()) { + return true; + } + + // Step 8. + if (isMapped) { + unsigned arg = unsigned(JSID_TO_INT(id)); + if (desc.isAccessorDescriptor()) { + if (!argsobj->markElementDeleted(cx, arg)) { + return false; + } + } else { + if (desc.hasValue()) { + argsobj->setElement(arg, desc.value()); + } + if (desc.hasWritable() && !desc.writable()) { + if (!argsobj->markElementDeleted(cx, arg)) { + return false; + } + } + } + } + + // Step 9. + return result.succeed(); +} + +static bool UnmappedArgGetter(JSContext* cx, HandleObject obj, HandleId id, + MutableHandleValue vp) { + UnmappedArgumentsObject& argsobj = obj->as(); + + if (JSID_IS_INT(id)) { + /* + * arg can exceed the number of arguments if a script changed the + * prototype to point to another Arguments object with a bigger argc. + */ + unsigned arg = unsigned(JSID_TO_INT(id)); + if (arg < argsobj.initialLength() && !argsobj.isElementDeleted(arg)) { + vp.set(argsobj.element(arg)); + } + } else { + MOZ_ASSERT(JSID_IS_ATOM(id, cx->names().length)); + if (!argsobj.hasOverriddenLength()) { + vp.setInt32(argsobj.initialLength()); + } + } + return true; +} + +static bool UnmappedArgSetter(JSContext* cx, HandleObject obj, HandleId id, + HandleValue v, ObjectOpResult& result) { + if (!obj->is()) { + return result.succeed(); + } + Handle argsobj = obj.as(); + + Rooted desc(cx); + if (!GetOwnPropertyDescriptor(cx, argsobj, id, &desc)) { + return false; + } + MOZ_ASSERT(desc.object()); + unsigned attrs = desc.attributes(); + MOZ_ASSERT(!(attrs & JSPROP_READONLY)); + attrs &= (JSPROP_ENUMERATE | JSPROP_PERMANENT); /* only valid attributes */ + + if (JSID_IS_INT(id)) { + unsigned arg = unsigned(JSID_TO_INT(id)); + if (arg < argsobj->initialLength()) { + argsobj->setElement(arg, v); + return result.succeed(); + } + } else { + MOZ_ASSERT(JSID_IS_ATOM(id, cx->names().length)); + } + + /* + * For simplicity we use delete/define to replace the property with a + * simple data property. Note that we rely on ArgumentsObject::obj_delProperty + * to set the corresponding override-bit. + */ + ObjectOpResult ignored; + return NativeDeleteProperty(cx, argsobj, id, ignored) && + NativeDefineDataProperty(cx, argsobj, id, v, attrs, result); +} + +/* static */ +bool UnmappedArgumentsObject::obj_resolve(JSContext* cx, HandleObject obj, + HandleId id, bool* resolvedp) { + Rooted argsobj(cx, + &obj->as()); + + if (JSID_IS_SYMBOL(id) && + JSID_TO_SYMBOL(id) == cx->wellKnownSymbols().iterator) { + if (argsobj->hasOverriddenIterator()) { + return true; + } + + if (!reifyIterator(cx, argsobj)) { + return false; + } + *resolvedp = true; + return true; + } + + if (JSID_IS_ATOM(id, cx->names().callee)) { + RootedObject throwTypeError( + cx, GlobalObject::getOrCreateThrowTypeError(cx, cx->global())); + if (!throwTypeError) { + return false; + } + + unsigned attrs = + JSPROP_RESOLVING | JSPROP_PERMANENT | JSPROP_GETTER | JSPROP_SETTER; + if (!NativeDefineAccessorProperty(cx, argsobj, id, throwTypeError, + throwTypeError, attrs)) { + return false; + } + + *resolvedp = true; + return true; + } + + unsigned attrs = JSPROP_RESOLVING; + if (JSID_IS_INT(id)) { + uint32_t arg = uint32_t(JSID_TO_INT(id)); + if (arg >= argsobj->initialLength() || argsobj->isElementDeleted(arg)) { + return true; + } + + attrs |= JSPROP_ENUMERATE; + } else if (JSID_IS_ATOM(id, cx->names().length)) { + if (argsobj->hasOverriddenLength()) { + return true; + } + } else { + return true; + } + + if (!NativeDefineAccessorProperty(cx, argsobj, id, UnmappedArgGetter, + UnmappedArgSetter, attrs)) { + return false; + } + + *resolvedp = true; + return true; +} + +/* static */ +bool UnmappedArgumentsObject::obj_enumerate(JSContext* cx, HandleObject obj) { + Rooted argsobj(cx, + &obj->as()); + + RootedId id(cx); + bool found; + + // Trigger reflection. + id = NameToId(cx->names().length); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + id = NameToId(cx->names().callee); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + id = SYMBOL_TO_JSID(cx->wellKnownSymbols().iterator); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + + for (unsigned i = 0; i < argsobj->initialLength(); i++) { + id = INT_TO_JSID(i); + if (!HasOwnProperty(cx, argsobj, id, &found)) { + return false; + } + } + + return true; +} + +void ArgumentsObject::finalize(JSFreeOp* fop, JSObject* obj) { + MOZ_ASSERT(!IsInsideNursery(obj)); + ArgumentsObject& argsobj = obj->as(); + if (argsobj.data()) { + fop->free_(&argsobj, argsobj.maybeRareData(), + RareArgumentsData::bytesRequired(argsobj.initialLength()), + MemoryUse::RareArgumentsData); + fop->free_(&argsobj, argsobj.data(), + ArgumentsData::bytesRequired(argsobj.data()->numArgs), + MemoryUse::ArgumentsData); + } +} + +void ArgumentsObject::trace(JSTracer* trc, JSObject* obj) { + ArgumentsObject& argsobj = obj->as(); + if (ArgumentsData* data = + argsobj.data()) { // Template objects have no ArgumentsData. + TraceRange(trc, data->numArgs, data->begin(), js_arguments_str); + } +} + +/* static */ +size_t ArgumentsObject::objectMoved(JSObject* dst, JSObject* src) { + ArgumentsObject* ndst = &dst->as(); + const ArgumentsObject* nsrc = &src->as(); + MOZ_ASSERT(ndst->data() == nsrc->data()); + + if (!IsInsideNursery(src)) { + return 0; + } + + Nursery& nursery = dst->runtimeFromMainThread()->gc.nursery(); + + size_t nbytesTotal = 0; + uint32_t nDataBytes = ArgumentsData::bytesRequired(nsrc->data()->numArgs); + if (!nursery.isInside(nsrc->data())) { + nursery.removeMallocedBufferDuringMinorGC(nsrc->data()); + } else { + AutoEnterOOMUnsafeRegion oomUnsafe; + uint8_t* data = nsrc->zone()->pod_malloc(nDataBytes); + if (!data) { + oomUnsafe.crash( + "Failed to allocate ArgumentsObject data while tenuring."); + } + ndst->initFixedSlot(DATA_SLOT, PrivateValue(data)); + + mozilla::PodCopy(data, reinterpret_cast(nsrc->data()), + nDataBytes); + nbytesTotal += nDataBytes; + } + + AddCellMemory(ndst, nDataBytes, MemoryUse::ArgumentsData); + + if (RareArgumentsData* srcRareData = nsrc->maybeRareData()) { + uint32_t nbytes = RareArgumentsData::bytesRequired(nsrc->initialLength()); + if (!nursery.isInside(srcRareData)) { + nursery.removeMallocedBufferDuringMinorGC(srcRareData); + } else { + AutoEnterOOMUnsafeRegion oomUnsafe; + uint8_t* dstRareData = nsrc->zone()->pod_malloc(nbytes); + if (!dstRareData) { + oomUnsafe.crash( + "Failed to allocate RareArgumentsData data while tenuring."); + } + ndst->data()->rareData = (RareArgumentsData*)dstRareData; + + mozilla::PodCopy(dstRareData, reinterpret_cast(srcRareData), + nbytes); + nbytesTotal += nbytes; + } + + AddCellMemory(ndst, nbytes, MemoryUse::RareArgumentsData); + } + + return nbytesTotal; +} + +/* + * The classes below collaborate to lazily reflect and synchronize actual + * argument values, argument count, and callee function object stored in a + * stack frame with their corresponding property values in the frame's + * arguments object. + */ +const JSClassOps MappedArgumentsObject::classOps_ = { + nullptr, // addProperty + ArgumentsObject::obj_delProperty, // delProperty + MappedArgumentsObject::obj_enumerate, // enumerate + nullptr, // newEnumerate + MappedArgumentsObject::obj_resolve, // resolve + ArgumentsObject::obj_mayResolve, // mayResolve + ArgumentsObject::finalize, // finalize + nullptr, // call + nullptr, // hasInstance + nullptr, // construct + ArgumentsObject::trace, // trace +}; + +const js::ClassExtension MappedArgumentsObject::classExt_ = { + ArgumentsObject::objectMoved, // objectMovedOp +}; + +const ObjectOps MappedArgumentsObject::objectOps_ = { + nullptr, // lookupProperty + MappedArgumentsObject::obj_defineProperty, // defineProperty + nullptr, // hasProperty + nullptr, // getProperty + nullptr, // setProperty + nullptr, // getOwnPropertyDescriptor + nullptr, // deleteProperty + nullptr, // getElements + nullptr, // funToString +}; + +const JSClass MappedArgumentsObject::class_ = { + "Arguments", + JSCLASS_DELAY_METADATA_BUILDER | + JSCLASS_HAS_RESERVED_SLOTS(MappedArgumentsObject::RESERVED_SLOTS) | + JSCLASS_HAS_CACHED_PROTO(JSProto_Object) | + JSCLASS_SKIP_NURSERY_FINALIZE | JSCLASS_BACKGROUND_FINALIZE, + &MappedArgumentsObject::classOps_, + nullptr, + &MappedArgumentsObject::classExt_, + &MappedArgumentsObject::objectOps_}; + +/* + * Unmapped arguments is significantly less magical than mapped arguments, so + * it is represented by a different class while sharing some functionality. + */ +const JSClassOps UnmappedArgumentsObject::classOps_ = { + nullptr, // addProperty + ArgumentsObject::obj_delProperty, // delProperty + UnmappedArgumentsObject::obj_enumerate, // enumerate + nullptr, // newEnumerate + UnmappedArgumentsObject::obj_resolve, // resolve + ArgumentsObject::obj_mayResolve, // mayResolve + ArgumentsObject::finalize, // finalize + nullptr, // call + nullptr, // hasInstance + nullptr, // construct + ArgumentsObject::trace, // trace +}; + +const js::ClassExtension UnmappedArgumentsObject::classExt_ = { + ArgumentsObject::objectMoved, // objectMovedOp +}; + +const JSClass UnmappedArgumentsObject::class_ = { + "Arguments", + JSCLASS_DELAY_METADATA_BUILDER | + JSCLASS_HAS_RESERVED_SLOTS(UnmappedArgumentsObject::RESERVED_SLOTS) | + JSCLASS_HAS_CACHED_PROTO(JSProto_Object) | + JSCLASS_SKIP_NURSERY_FINALIZE | JSCLASS_BACKGROUND_FINALIZE, + &UnmappedArgumentsObject::classOps_, nullptr, + &UnmappedArgumentsObject::classExt_}; diff --git a/js/src/vm/ArgumentsObject.h b/js/src/vm/ArgumentsObject.h new file mode 100644 index 0000000000..89c591c46e --- /dev/null +++ b/js/src/vm/ArgumentsObject.h @@ -0,0 +1,496 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArgumentsObject_h +#define vm_ArgumentsObject_h + +#include "mozilla/MemoryReporting.h" + +#include "gc/Barrier.h" +#include "util/BitArray.h" +#include "vm/NativeObject.h" + +namespace js { + +class AbstractFramePtr; +class ArgumentsObject; +class ScriptFrameIter; + +namespace jit { +class JitFrameLayout; +} // namespace jit + +// RareArgumentsData stores the deleted-elements bits for an arguments object. +// Because |delete arguments[i]| is uncommon, we allocate this data the first +// time an element is deleted. +class RareArgumentsData { + // Pointer to an array of bits indicating, for every argument in + // [0, initialLength) whether the element has been deleted. See + // ArgumentsObject::isElementDeleted comment. + size_t deletedBits_[1]; + + RareArgumentsData() = default; + RareArgumentsData(const RareArgumentsData&) = delete; + void operator=(const RareArgumentsData&) = delete; + + public: + static RareArgumentsData* create(JSContext* cx, ArgumentsObject* obj); + static size_t bytesRequired(size_t numActuals); + + bool isAnyElementDeleted(size_t len) const { + return IsAnyBitArrayElementSet(deletedBits_, len); + } + bool isElementDeleted(size_t len, size_t i) const { + MOZ_ASSERT(i < len); + return IsBitArrayElementSet(deletedBits_, len, i); + } + void markElementDeleted(size_t len, size_t i) { + MOZ_ASSERT(i < len); + SetBitArrayElement(deletedBits_, len, i); + } +}; + +// ArgumentsData stores the initial indexed arguments provided to a function +// call. It is used to store arguments[i] -- up until the corresponding +// property is modified, when the relevant value is flagged to memorialize the +// modification. +struct ArgumentsData { + /* + * numArgs = std::max(numFormalArgs, numActualArgs) + * The array 'args' has numArgs elements. + */ + uint32_t numArgs; + + RareArgumentsData* rareData; + + /* + * This array holds either the current argument value or the magic + * forwarding value. The latter means that the function has both a + * CallObject and an ArgumentsObject AND the particular formal variable is + * aliased by the CallObject. In such cases, the CallObject holds the + * canonical value so any element access to the arguments object should load + * the value out of the CallObject (which is pointed to by MAYBE_CALL_SLOT). + */ + GCPtrValue args[1]; + + /* For jit use: */ + static ptrdiff_t offsetOfArgs() { return offsetof(ArgumentsData, args); } + + /* Iterate args. */ + GCPtrValue* begin() { return args; } + const GCPtrValue* begin() const { return args; } + GCPtrValue* end() { return args + numArgs; } + const GCPtrValue* end() const { return args + numArgs; } + + static size_t bytesRequired(size_t numArgs) { + return offsetof(ArgumentsData, args) + numArgs * sizeof(Value); + } +}; + +// Maximum supported value of arguments.length. This bounds the +// maximum number of arguments that can be supplied to a spread call +// or Function.prototype.apply. This value also bounds the number of +// elements parsed in an array initializer. NB: keep this in sync +// with the copy in builtin/SelfHostingDefines.h. +static const unsigned ARGS_LENGTH_MAX = 500 * 1000; + +// Maximum number of arguments supported in jitcode. This bounds the +// maximum number of arguments that can be supplied to a spread call +// or Function.prototype.apply without entering the VM. We limit the +// number of parameters we can handle to a number that does not risk +// us allocating too much stack, notably on Windows where there is a +// 4K guard page that has to be touched to extend the stack. The value +// "3000" is the size of the guard page minus an arbitrary, but large, +// safety margin. See bug 1351278. +static const uint32_t JIT_ARGS_LENGTH_MAX = 3000 / sizeof(JS::Value); + +static_assert(JIT_ARGS_LENGTH_MAX <= ARGS_LENGTH_MAX, + "maximum jit arguments should be <= maximum arguments"); + +/* + * [SMDOC] ArgumentsObject + * + * ArgumentsObject instances represent |arguments| objects created to store + * function arguments when a function is called. It's expensive to create such + * objects if they're never used, so they're only created when they are + * potentially used. + * + * Arguments objects are complicated because, for non-strict mode code, they + * must alias any named arguments which were provided to the function. Gnarly + * example: + * + * function f(a, b, c, d) + * { + * arguments[0] = "seta"; + * assertEq(a, "seta"); + * b = "setb"; + * assertEq(arguments[1], "setb"); + * c = "setc"; + * assertEq(arguments[2], undefined); + * arguments[3] = "setd"; + * assertEq(d, undefined); + * } + * f("arga", "argb"); + * + * ES5's strict mode behaves more sanely, and named arguments don't alias + * elements of an arguments object. + * + * ArgumentsObject instances use the following reserved slots: + * + * INITIAL_LENGTH_SLOT + * Stores the initial value of arguments.length, plus a bit indicating + * whether arguments.length and/or arguments[@@iterator] have been + * modified. Use initialLength(), hasOverriddenLength(), and + * hasOverriddenIterator() to access these values. If arguments.length has + * been modified, then the current value of arguments.length is stored in + * another slot associated with a new property. + * DATA_SLOT + * Stores an ArgumentsData*, described above. + * MAYBE_CALL_SLOT + * Stores the CallObject, if the callee has aliased bindings. See + * the ArgumentsData::args comment. + * CALLEE_SLOT + * Stores the initial arguments.callee. This value can be overridden on + * mapped arguments objects, see hasOverriddenCallee. + */ +class ArgumentsObject : public NativeObject { + protected: + static const uint32_t INITIAL_LENGTH_SLOT = 0; + static const uint32_t DATA_SLOT = 1; + static const uint32_t MAYBE_CALL_SLOT = 2; + static const uint32_t CALLEE_SLOT = 3; + + public: + static const uint32_t LENGTH_OVERRIDDEN_BIT = 0x1; + static const uint32_t ITERATOR_OVERRIDDEN_BIT = 0x2; + static const uint32_t ELEMENT_OVERRIDDEN_BIT = 0x4; + static const uint32_t CALLEE_OVERRIDDEN_BIT = 0x8; + static const uint32_t PACKED_BITS_COUNT = 4; + + static_assert(ARGS_LENGTH_MAX <= (UINT32_MAX >> PACKED_BITS_COUNT), + "Max arguments length must fit in available bits"); + + protected: + template + static ArgumentsObject* create(JSContext* cx, HandleFunction callee, + unsigned numActuals, CopyArgs& copy); + + ArgumentsData* data() const { + return reinterpret_cast( + getFixedSlot(DATA_SLOT).toPrivate()); + } + + RareArgumentsData* maybeRareData() const { return data()->rareData; } + + MOZ_MUST_USE bool createRareData(JSContext* cx); + + RareArgumentsData* getOrCreateRareData(JSContext* cx) { + if (!data()->rareData && !createRareData(cx)) { + return nullptr; + } + return data()->rareData; + } + + static bool obj_delProperty(JSContext* cx, HandleObject obj, HandleId id, + ObjectOpResult& result); + + static bool obj_mayResolve(const JSAtomState& names, jsid id, JSObject*); + + public: + static const uint32_t RESERVED_SLOTS = 4; + static const gc::AllocKind FINALIZE_KIND = gc::AllocKind::OBJECT4_BACKGROUND; + + /* Create an arguments object for a frame that is expecting them. */ + static ArgumentsObject* createExpected(JSContext* cx, AbstractFramePtr frame); + + /* + * Purposefully disconnect the returned arguments object from the frame + * by always creating a new copy that does not alias formal parameters. + * This allows function-local analysis to determine that formals are + * not aliased and generally simplifies arguments objects. + */ + static ArgumentsObject* createUnexpected(JSContext* cx, + ScriptFrameIter& iter); + static ArgumentsObject* createUnexpected(JSContext* cx, + AbstractFramePtr frame); + static ArgumentsObject* createForIon(JSContext* cx, + jit::JitFrameLayout* frame, + HandleObject scopeChain); + + /* + * Allocate ArgumentsData and fill reserved slots after allocating an + * ArgumentsObject in Ion code. + */ + static ArgumentsObject* finishForIonPure(JSContext* cx, + jit::JitFrameLayout* frame, + JSObject* scopeChain, + ArgumentsObject* obj); + + static ArgumentsObject* createTemplateObject(JSContext* cx, bool mapped); + + /* + * Return the initial length of the arguments. This may differ from the + * current value of arguments.length! + */ + uint32_t initialLength() const { + uint32_t argc = uint32_t(getFixedSlot(INITIAL_LENGTH_SLOT).toInt32()) >> + PACKED_BITS_COUNT; + MOZ_ASSERT(argc <= ARGS_LENGTH_MAX); + return argc; + } + + /* True iff arguments.length has been assigned or its attributes changed. */ + bool hasOverriddenLength() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & LENGTH_OVERRIDDEN_BIT; + } + + void markLengthOverridden() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | LENGTH_OVERRIDDEN_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + /* + * Create the default "length" property and set LENGTH_OVERRIDDEN_BIT. + */ + static bool reifyLength(JSContext* cx, Handle obj); + + /* True iff arguments[@@iterator] has been assigned or its attributes + * changed. */ + bool hasOverriddenIterator() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & ITERATOR_OVERRIDDEN_BIT; + } + + void markIteratorOverridden() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | ITERATOR_OVERRIDDEN_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + /* + * Create the default @@iterator property and set ITERATOR_OVERRIDDEN_BIT. + */ + static bool reifyIterator(JSContext* cx, Handle obj); + + /* + * Return the arguments iterator function. + */ + static bool getArgumentsIterator(JSContext* cx, MutableHandleValue val); + + /* True iff any element has been assigned or its attributes + * changed. */ + bool hasOverriddenElement() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & ELEMENT_OVERRIDDEN_BIT; + } + + void markElementOverridden() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | ELEMENT_OVERRIDDEN_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + /* + * Because the arguments object is a real object, its elements may be + * deleted. This is implemented by setting a 'deleted' flag for the arg + * which is read by argument object resolve and getter/setter hooks. + * + * NB: an element, once deleted, stays deleted. Thus: + * + * function f(x) { delete arguments[0]; arguments[0] = 42; return x } + * assertEq(f(1), 1); + * + * This works because, once a property is deleted from an arguments object, + * it gets regular properties with regular getters/setters that don't alias + * ArgumentsData::slots. + */ + bool isElementDeleted(uint32_t i) const { + MOZ_ASSERT(i < data()->numArgs); + if (i >= initialLength()) { + return false; + } + return maybeRareData() && + maybeRareData()->isElementDeleted(initialLength(), i); + } + + bool isAnyElementDeleted() const { + return maybeRareData() && + maybeRareData()->isAnyElementDeleted(initialLength()); + } + + bool markElementDeleted(JSContext* cx, uint32_t i); + + /* + * An ArgumentsObject serves two roles: + * - a real object, accessed through regular object operations, e.g.., + * GetElement corresponding to 'arguments[i]'; + * - a VM-internal data structure, storing the value of arguments (formal + * and actual) that are accessed directly by the VM when a reading the + * value of a formal parameter. + * There are two ways to access the ArgumentsData::args corresponding to + * these two use cases: + * - object access should use elements(i) which will take care of + * forwarding when the value is the magic forwarding value; + * - VM argument access should use arg(i) which will assert that the + * value is not the magic forwarding value (since, if such forwarding was + * needed, the frontend should have emitted JSOp::GetAliasedVar). + */ + const Value& element(uint32_t i) const; + + inline void setElement(uint32_t i, const Value& v); + + const Value& arg(unsigned i) const { + MOZ_ASSERT(i < data()->numArgs); + const Value& v = data()->args[i]; + MOZ_ASSERT(!v.isMagic()); + return v; + } + + void setArg(unsigned i, const Value& v) { + MOZ_ASSERT(i < data()->numArgs); + GCPtrValue& lhs = data()->args[i]; + MOZ_ASSERT(!lhs.isMagic()); + lhs = v; + } + + /* + * Test if an argument is forwarded, i.e. its actual value is stored in the + * CallObject and can't be directly read from |ArgumentsData::args|. + */ + bool argIsForwarded(unsigned i) const { + MOZ_ASSERT(i < data()->numArgs); + const Value& v = data()->args[i]; + return IsMagicScopeSlotValue(v); + } + + /* + * Attempt to speedily and efficiently access the i-th element of this + * arguments object. Return true if the element was speedily returned. + * Return false if the element must be looked up more slowly using + * getProperty or some similar method. The second overload copies the + * elements [start, start + count) into the locations starting at 'vp'. + * + * NB: Returning false does not indicate error! + */ + bool maybeGetElement(uint32_t i, MutableHandleValue vp) { + if (i >= initialLength() || isElementDeleted(i)) { + return false; + } + vp.set(element(i)); + return true; + } + + inline bool maybeGetElements(uint32_t start, uint32_t count, js::Value* vp); + + /* + * Measures things hanging off this ArgumentsObject that are counted by the + * |miscSize| argument in JSObject::sizeOfExcludingThis(). + */ + size_t sizeOfMisc(mozilla::MallocSizeOf mallocSizeOf) const { + if (!data()) { // Template arguments objects have no data. + return 0; + } + return mallocSizeOf(data()) + mallocSizeOf(maybeRareData()); + } + size_t sizeOfData() const { + return ArgumentsData::bytesRequired(data()->numArgs) + + (maybeRareData() ? RareArgumentsData::bytesRequired(initialLength()) + : 0); + } + + static void finalize(JSFreeOp* fop, JSObject* obj); + static void trace(JSTracer* trc, JSObject* obj); + static size_t objectMoved(JSObject* dst, JSObject* src); + + /* For jit use: */ + static size_t getDataSlotOffset() { return getFixedSlotOffset(DATA_SLOT); } + static size_t getInitialLengthSlotOffset() { + return getFixedSlotOffset(INITIAL_LENGTH_SLOT); + } + + static Value MagicEnvSlotValue(uint32_t slot) { + // When forwarding slots to a backing CallObject, the slot numbers are + // stored as uint32 magic values. This raises an ambiguity if we have + // also copied JS_OPTIMIZED_OUT magic from a JIT frame or + // JS_UNINITIALIZED_LEXICAL magic on the CallObject. To distinguish + // normal magic values (those with a JSWhyMagic) and uint32 magic + // values, we add the maximum JSWhyMagic value to the slot + // number. This is safe as ARGS_LENGTH_MAX is well below UINT32_MAX. + static_assert(UINT32_MAX - JS_WHY_MAGIC_COUNT > ARGS_LENGTH_MAX); + return JS::MagicValueUint32(slot + JS_WHY_MAGIC_COUNT); + } + static uint32_t SlotFromMagicScopeSlotValue(const Value& v) { + static_assert(UINT32_MAX - JS_WHY_MAGIC_COUNT > ARGS_LENGTH_MAX); + return v.magicUint32() - JS_WHY_MAGIC_COUNT; + } + static bool IsMagicScopeSlotValue(const Value& v) { + return v.isMagic() && v.magicUint32() > JS_WHY_MAGIC_COUNT; + } + + static void MaybeForwardToCallObject(AbstractFramePtr frame, + ArgumentsObject* obj, + ArgumentsData* data); + static void MaybeForwardToCallObject(jit::JitFrameLayout* frame, + HandleObject callObj, + ArgumentsObject* obj, + ArgumentsData* data); +}; + +class MappedArgumentsObject : public ArgumentsObject { + static const JSClassOps classOps_; + static const ClassExtension classExt_; + static const ObjectOps objectOps_; + + public: + static const JSClass class_; + + JSFunction& callee() const { + return getFixedSlot(CALLEE_SLOT).toObject().as(); + } + + bool hasOverriddenCallee() const { + const Value& v = getFixedSlot(INITIAL_LENGTH_SLOT); + return v.toInt32() & CALLEE_OVERRIDDEN_BIT; + } + + void markCalleeOverridden() { + uint32_t v = + getFixedSlot(INITIAL_LENGTH_SLOT).toInt32() | CALLEE_OVERRIDDEN_BIT; + setFixedSlot(INITIAL_LENGTH_SLOT, Int32Value(v)); + } + + private: + static bool obj_enumerate(JSContext* cx, HandleObject obj); + static bool obj_resolve(JSContext* cx, HandleObject obj, HandleId id, + bool* resolvedp); + static bool obj_defineProperty(JSContext* cx, HandleObject obj, HandleId id, + Handle desc, + ObjectOpResult& result); +}; + +class UnmappedArgumentsObject : public ArgumentsObject { + static const JSClassOps classOps_; + static const ClassExtension classExt_; + + public: + static const JSClass class_; + + private: + static bool obj_enumerate(JSContext* cx, HandleObject obj); + static bool obj_resolve(JSContext* cx, HandleObject obj, HandleId id, + bool* resolvedp); +}; + +} // namespace js + +template <> +inline bool JSObject::is() const { + return is() || is(); +} + +#endif /* vm_ArgumentsObject_h */ diff --git a/js/src/vm/ArrayBufferObject-inl.h b/js/src/vm/ArrayBufferObject-inl.h new file mode 100644 index 0000000000..4181fb2f49 --- /dev/null +++ b/js/src/vm/ArrayBufferObject-inl.h @@ -0,0 +1,81 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayBufferObject_inl_h +#define vm_ArrayBufferObject_inl_h + +// Utilities and common inline code for ArrayBufferObject and +// SharedArrayBufferObject. + +#include "vm/ArrayBufferObject.h" + +#include "js/Value.h" + +#include "vm/SharedArrayObject.h" +#include "vm/SharedMem.h" + +namespace js { + +inline SharedMem ArrayBufferObjectMaybeShared::dataPointerEither() { + ArrayBufferObjectMaybeShared* buf = this; + if (buf->is()) { + return buf->as().dataPointerShared(); + } + return buf->as().dataPointerShared(); +} + +inline bool ArrayBufferObjectMaybeShared::isDetached() const { + if (this->is()) { + return this->as().isDetached(); + } + return false; +} + +inline BufferSize AnyArrayBufferByteLength( + const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().byteLength(); + } + return buf->as().byteLength(); +} + +inline BufferSize ArrayBufferObjectMaybeShared::byteLength() const { + return AnyArrayBufferByteLength(this); +} + +inline bool AnyArrayBufferIsPreparedForAsmJS( + const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().isPreparedForAsmJS(); + } + return buf->as().isPreparedForAsmJS(); +} + +inline bool ArrayBufferObjectMaybeShared::isPreparedForAsmJS() const { + return AnyArrayBufferIsPreparedForAsmJS(this); +} + +inline bool AnyArrayBufferIsWasm(const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().isWasm(); + } + return buf->as().isWasm(); +} + +inline bool ArrayBufferObjectMaybeShared::isWasm() const { + return AnyArrayBufferIsWasm(this); +} + +inline ArrayBufferObjectMaybeShared& AsAnyArrayBuffer(HandleValue val) { + if (val.toObject().is()) { + return val.toObject().as(); + } + return val.toObject().as(); +} + +} // namespace js + +#endif // vm_ArrayBufferObject_inl_h diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp new file mode 100644 index 0000000000..8a89baddf1 --- /dev/null +++ b/js/src/vm/ArrayBufferObject.cpp @@ -0,0 +1,1904 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/ArrayBufferObject-inl.h" +#include "vm/ArrayBufferObject.h" + +#include "mozilla/Alignment.h" +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/CheckedInt.h" +#include "mozilla/FloatingPoint.h" +#include "mozilla/Likely.h" +#include "mozilla/Maybe.h" +#include "mozilla/PodOperations.h" +#include "mozilla/ScopeExit.h" +#include "mozilla/TaggedAnonymousMemory.h" + +#include // std::max, std::min +#include // std::uninitialized_copy_n +#include +#ifndef XP_WIN +# include +#endif +#include // std::tuple +#ifdef MOZ_VALGRIND +# include +#endif + +#include "jsapi.h" +#include "jsfriendapi.h" +#include "jsnum.h" +#include "jstypes.h" + +#include "builtin/Array.h" +#include "builtin/DataViewObject.h" +#include "gc/Barrier.h" +#include "gc/Memory.h" +#include "js/ArrayBuffer.h" +#include "js/Conversions.h" +#include "js/experimental/TypedData.h" // JS_IsArrayBufferViewObject +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "js/MemoryMetrics.h" +#include "js/PropertySpec.h" +#include "js/SharedArrayBuffer.h" +#include "js/Wrapper.h" +#include "util/Windows.h" +#include "vm/GlobalObject.h" +#include "vm/Interpreter.h" +#include "vm/JSContext.h" +#include "vm/JSObject.h" +#include "vm/SharedArrayObject.h" +#include "vm/Warnings.h" // js::WarnNumberASCII +#include "vm/WrapperObject.h" +#include "wasm/WasmSignalHandlers.h" +#include "wasm/WasmTypes.h" + +#include "gc/FreeOp-inl.h" +#include "gc/Marking-inl.h" +#include "gc/Nursery-inl.h" +#include "vm/JSAtom-inl.h" +#include "vm/NativeObject-inl.h" +#include "vm/Realm-inl.h" // js::AutoRealm +#include "vm/Shape-inl.h" + +using JS::ToInt32; + +using mozilla::Atomic; +using mozilla::CheckedInt; +using mozilla::Maybe; +using mozilla::Nothing; +using mozilla::Some; +using mozilla::Unused; + +using namespace js; + +// If there are too many wasm memory buffers (typically 6GB each) live we run up +// against system resource exhaustion (address space or number of memory map +// descriptors), see bug 1068684, bug 1073934, bug 1517412, bug 1502733 for +// details. The limiting case seems to be Android on ARM64, where the +// per-process address space is limited to 4TB (39 bits) by the organization of +// the page tables. An earlier problem was Windows Vista Home 64-bit, where the +// per-process address space is limited to 8TB (40 bits). +// +// Thus we track the number of live objects if we are using large mappings, and +// set a limit of the number of live buffer objects per process. We trigger GC +// work when we approach the limit and we throw an OOM error if the per-process +// limit is exceeded. The limit (MaximumLiveMappedBuffers) is specific to +// architecture, OS, and OS configuration. +// +// Since the MaximumLiveMappedBuffers limit is not generally accounted for by +// any existing GC-trigger heuristics, we need an extra heuristic for triggering +// GCs when the caller is allocating memories rapidly without other garbage. +// Thus, once the live buffer count crosses the threshold +// StartTriggeringAtLiveBufferCount, we start triggering GCs every +// AllocatedBuffersPerTrigger allocations. Once we reach +// StartSyncFullGCAtLiveBufferCount live buffers, we perform expensive +// non-incremental full GCs as a last-ditch effort to avoid unnecessary failure. +// Once we reach MaximumLiveMappedBuffers, we perform further full GCs before +// giving up. + +#if defined(JS_CODEGEN_ARM64) && defined(ANDROID) +// With 6GB mappings, the hard limit is 84 buffers. 75 cuts it close. +static const int32_t MaximumLiveMappedBuffers = 75; +#elif defined(MOZ_TSAN) || defined(MOZ_ASAN) +// ASAN and TSAN use a ton of vmem for bookkeeping leaving a lot less for the +// program so use a lower limit. +static const int32_t MaximumLiveMappedBuffers = 500; +#else +static const int32_t MaximumLiveMappedBuffers = 1000; +#endif + +// StartTriggeringAtLiveBufferCount + AllocatedBuffersPerTrigger must be well +// below StartSyncFullGCAtLiveBufferCount in order to provide enough time for +// incremental GC to do its job. + +#if defined(JS_CODEGEN_ARM64) && defined(ANDROID) +static const int32_t StartTriggeringAtLiveBufferCount = 15; +static const int32_t StartSyncFullGCAtLiveBufferCount = + MaximumLiveMappedBuffers - 15; +static const int32_t AllocatedBuffersPerTrigger = 15; +#else +static const int32_t StartTriggeringAtLiveBufferCount = 100; +static const int32_t StartSyncFullGCAtLiveBufferCount = + MaximumLiveMappedBuffers - 100; +static const int32_t AllocatedBuffersPerTrigger = 100; +#endif + +static Atomic liveBufferCount(0); +static Atomic allocatedSinceLastTrigger(0); + +int32_t js::LiveMappedBufferCount() { return liveBufferCount; } + +bool js::ArrayBufferObject::supportLargeBuffers = false; + +static MOZ_MUST_USE bool CheckArrayBufferTooLarge(JSContext* cx, + uint64_t nbytes) { + // Refuse to allocate too large buffers. + if (MOZ_UNLIKELY(nbytes > ArrayBufferObject::maxBufferByteLength())) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BAD_ARRAY_LENGTH); + return false; + } + + return true; +} + +void* js::MapBufferMemory(size_t mappedSize, size_t initialCommittedSize) { + MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); + MOZ_ASSERT(initialCommittedSize % gc::SystemPageSize() == 0); + MOZ_ASSERT(initialCommittedSize <= mappedSize); + + auto decrement = mozilla::MakeScopeExit([&] { liveBufferCount--; }); + if (wasm::IsHugeMemoryEnabled()) { + liveBufferCount++; + } else { + decrement.release(); + } + + // Test >= to guard against the case where multiple extant runtimes + // race to allocate. + if (liveBufferCount >= MaximumLiveMappedBuffers) { + if (OnLargeAllocationFailure) { + OnLargeAllocationFailure(); + } + if (liveBufferCount >= MaximumLiveMappedBuffers) { + return nullptr; + } + } + +#ifdef XP_WIN + void* data = VirtualAlloc(nullptr, mappedSize, MEM_RESERVE, PAGE_NOACCESS); + if (!data) { + return nullptr; + } + + if (!VirtualAlloc(data, initialCommittedSize, MEM_COMMIT, PAGE_READWRITE)) { + VirtualFree(data, 0, MEM_RELEASE); + return nullptr; + } +#else // XP_WIN + void* data = + MozTaggedAnonymousMmap(nullptr, mappedSize, PROT_NONE, + MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved"); + if (data == MAP_FAILED) { + return nullptr; + } + + // Note we will waste a page on zero-sized memories here + if (mprotect(data, initialCommittedSize, PROT_READ | PROT_WRITE)) { + munmap(data, mappedSize); + return nullptr; + } +#endif // !XP_WIN + +#if defined(MOZ_VALGRIND) && \ + defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) + VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE( + (unsigned char*)data + initialCommittedSize, + mappedSize - initialCommittedSize); +#endif + + decrement.release(); + return data; +} + +bool js::CommitBufferMemory(void* dataEnd, size_t delta) { + MOZ_ASSERT(delta); + MOZ_ASSERT(delta % gc::SystemPageSize() == 0); + +#ifdef XP_WIN + if (!VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE)) { + return false; + } +#else // XP_WIN + if (mprotect(dataEnd, delta, PROT_READ | PROT_WRITE)) { + return false; + } +#endif // !XP_WIN + +#if defined(MOZ_VALGRIND) && \ + defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE) + VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta); +#endif + + return true; +} + +bool js::ExtendBufferMapping(void* dataPointer, size_t mappedSize, + size_t newMappedSize) { + MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); + MOZ_ASSERT(newMappedSize % gc::SystemPageSize() == 0); + MOZ_ASSERT(newMappedSize >= mappedSize); + +#ifdef XP_WIN + void* mappedEnd = (char*)dataPointer + mappedSize; + uint32_t delta = newMappedSize - mappedSize; + if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS)) { + return false; + } + return true; +#elif defined(XP_LINUX) + // Note this will not move memory (no MREMAP_MAYMOVE specified) + if (MAP_FAILED == mremap(dataPointer, mappedSize, newMappedSize, 0)) { + return false; + } + return true; +#else + // No mechanism for remapping on MacOS and other Unices. Luckily + // shouldn't need it here as most of these are 64-bit. + return false; +#endif +} + +void js::UnmapBufferMemory(void* base, size_t mappedSize) { + MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); + +#ifdef XP_WIN + VirtualFree(base, 0, MEM_RELEASE); +#else // XP_WIN + munmap(base, mappedSize); +#endif // !XP_WIN + +#if defined(MOZ_VALGRIND) && \ + defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE) + VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)base, + mappedSize); +#endif + + if (wasm::IsHugeMemoryEnabled()) { + // Decrement the buffer counter at the end -- otherwise, a race condition + // could enable the creation of unlimited buffers. + --liveBufferCount; + } +} + +/* + * ArrayBufferObject + * + * This class holds the underlying raw buffer that the TypedArrayObject classes + * access. It can be created explicitly and passed to a TypedArrayObject, or + * can be created implicitly by constructing a TypedArrayObject with a size. + */ + +/* + * ArrayBufferObject (base) + */ + +static const JSClassOps ArrayBufferObjectClassOps = { + nullptr, // addProperty + nullptr, // delProperty + nullptr, // enumerate + nullptr, // newEnumerate + nullptr, // resolve + nullptr, // mayResolve + ArrayBufferObject::finalize, // finalize + nullptr, // call + nullptr, // hasInstance + nullptr, // construct + nullptr, // trace +}; + +static const JSFunctionSpec arraybuffer_functions[] = { + JS_FN("isView", ArrayBufferObject::fun_isView, 1, 0), JS_FS_END}; + +static const JSPropertySpec arraybuffer_properties[] = { + JS_SELF_HOSTED_SYM_GET(species, "$ArrayBufferSpecies", 0), JS_PS_END}; + +static const JSFunctionSpec arraybuffer_proto_functions[] = { + JS_SELF_HOSTED_FN("slice", "ArrayBufferSlice", 2, 0), JS_FS_END}; + +static const JSPropertySpec arraybuffer_proto_properties[] = { + JS_PSG("byteLength", ArrayBufferObject::byteLengthGetter, 0), + JS_STRING_SYM_PS(toStringTag, "ArrayBuffer", JSPROP_READONLY), JS_PS_END}; + +static const ClassSpec ArrayBufferObjectClassSpec = { + GenericCreateConstructor, + GenericCreatePrototype, + arraybuffer_functions, + arraybuffer_properties, + arraybuffer_proto_functions, + arraybuffer_proto_properties}; + +static const ClassExtension ArrayBufferObjectClassExtension = { + ArrayBufferObject::objectMoved, // objectMovedOp +}; + +const JSClass ArrayBufferObject::class_ = { + "ArrayBuffer", + JSCLASS_DELAY_METADATA_BUILDER | + JSCLASS_HAS_RESERVED_SLOTS(RESERVED_SLOTS) | + JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer) | + JSCLASS_BACKGROUND_FINALIZE, + &ArrayBufferObjectClassOps, &ArrayBufferObjectClassSpec, + &ArrayBufferObjectClassExtension}; + +const JSClass ArrayBufferObject::protoClass_ = { + "ArrayBuffer.prototype", JSCLASS_HAS_CACHED_PROTO(JSProto_ArrayBuffer), + JS_NULL_CLASS_OPS, &ArrayBufferObjectClassSpec}; + +bool js::IsArrayBuffer(HandleValue v) { + return v.isObject() && v.toObject().is(); +} + +bool js::IsArrayBuffer(JSObject* obj) { return obj->is(); } + +ArrayBufferObject& js::AsArrayBuffer(JSObject* obj) { + MOZ_ASSERT(IsArrayBuffer(obj)); + return obj->as(); +} + +bool js::IsArrayBufferMaybeShared(HandleValue v) { + return v.isObject() && v.toObject().is(); +} + +bool js::IsArrayBufferMaybeShared(JSObject* obj) { + return obj->is(); +} + +ArrayBufferObjectMaybeShared& js::AsArrayBufferMaybeShared(JSObject* obj) { + MOZ_ASSERT(IsArrayBufferMaybeShared(obj)); + return obj->as(); +} + +MOZ_ALWAYS_INLINE bool ArrayBufferObject::byteLengthGetterImpl( + JSContext* cx, const CallArgs& args) { + MOZ_ASSERT(IsArrayBuffer(args.thisv())); + auto* buffer = &args.thisv().toObject().as(); + args.rval().setNumber(buffer->byteLength().get()); + return true; +} + +bool ArrayBufferObject::byteLengthGetter(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + return CallNonGenericMethod(cx, args); +} + +/* + * ArrayBuffer.isView(obj); ES6 (Dec 2013 draft) 24.1.3.1 + */ +bool ArrayBufferObject::fun_isView(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + args.rval().setBoolean(args.get(0).isObject() && + JS_IsArrayBufferViewObject(&args.get(0).toObject())); + return true; +} + +// ES2017 draft 24.1.2.1 +bool ArrayBufferObject::class_constructor(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Step 1. + if (!ThrowIfNotConstructing(cx, args, "ArrayBuffer")) { + return false; + } + + // Step 2. + uint64_t byteLength; + if (!ToIndex(cx, args.get(0), &byteLength)) { + return false; + } + + // Step 3 (Inlined 24.1.1.1 AllocateArrayBuffer). + // 24.1.1.1, step 1 (Inlined 9.1.14 OrdinaryCreateFromConstructor). + RootedObject proto(cx); + if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_ArrayBuffer, + &proto)) { + return false; + } + + // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2). + if (!CheckArrayBufferTooLarge(cx, byteLength)) { + return false; + } + + // 24.1.1.1, steps 1 and 4-6. + JSObject* bufobj = createZeroed(cx, BufferSize(byteLength), proto); + if (!bufobj) { + return false; + } + args.rval().setObject(*bufobj); + return true; +} + +using ArrayBufferContents = UniquePtr; + +static ArrayBufferContents AllocateUninitializedArrayBufferContents( + JSContext* cx, BufferSize nbytes) { + // First attempt a normal allocation. + uint8_t* p = cx->maybe_pod_arena_malloc(js::ArrayBufferContentsArena, + nbytes.get()); + if (MOZ_UNLIKELY(!p)) { + // Otherwise attempt a large allocation, calling the + // large-allocation-failure callback if necessary. + p = static_cast(cx->runtime()->onOutOfMemoryCanGC( + js::AllocFunction::Malloc, js::ArrayBufferContentsArena, nbytes.get())); + if (!p) { + ReportOutOfMemory(cx); + } + } + + return ArrayBufferContents(p); +} + +static ArrayBufferContents AllocateArrayBufferContents(JSContext* cx, + BufferSize nbytes) { + // First attempt a normal allocation. + uint8_t* p = cx->maybe_pod_arena_calloc(js::ArrayBufferContentsArena, + nbytes.get()); + if (MOZ_UNLIKELY(!p)) { + // Otherwise attempt a large allocation, calling the + // large-allocation-failure callback if necessary. + p = static_cast(cx->runtime()->onOutOfMemoryCanGC( + js::AllocFunction::Calloc, js::ArrayBufferContentsArena, nbytes.get())); + if (!p) { + ReportOutOfMemory(cx); + } + } + + return ArrayBufferContents(p); +} + +static ArrayBufferContents NewCopiedBufferContents( + JSContext* cx, Handle buffer) { + ArrayBufferContents dataCopy = + AllocateUninitializedArrayBufferContents(cx, buffer->byteLength()); + if (dataCopy) { + if (auto count = buffer->byteLength().get()) { + memcpy(dataCopy.get(), buffer->dataPointer(), count); + } + } + return dataCopy; +} + +/* static */ +void ArrayBufferObject::detach(JSContext* cx, + Handle buffer) { + cx->check(buffer); + MOZ_ASSERT(!buffer->isPreparedForAsmJS()); + MOZ_ASSERT(!buffer->hasTypedObjectViews()); + + // Update all views of the buffer to account for the buffer having been + // detached, and clear the buffer's data and list of views. + // + // Typed object buffers are not exposed and cannot be detached. + + auto& innerViews = ObjectRealm::get(buffer).innerViews.get(); + if (InnerViewTable::ViewVector* views = + innerViews.maybeViewsUnbarriered(buffer)) { + for (size_t i = 0; i < views->length(); i++) { + JSObject* view = (*views)[i]; + view->as().notifyBufferDetached(); + } + innerViews.removeViews(buffer); + } + if (JSObject* view = buffer->firstView()) { + view->as().notifyBufferDetached(); + buffer->setFirstView(nullptr); + } + + if (buffer->dataPointer()) { + buffer->releaseData(cx->runtime()->defaultFreeOp()); + buffer->setDataPointer(BufferContents::createNoData()); + } + + buffer->setByteLength(BufferSize(0)); + buffer->setIsDetached(); +} + +/* + * [SMDOC] WASM Linear Memory structure + * + * Wasm Raw Buf Linear Memory Structure + * + * The linear heap in Wasm is an mmaped array buffer. Several + * constants manage its lifetime: + * + * - length - the wasm-visible current length of the buffer. Accesses in the + * range [0, length] succeed. May only increase. + * + * - boundsCheckLimit - the size against which we perform bounds checks. It is + * always a constant offset smaller than mappedSize. Currently that constant + * offset is 64k (wasm::GuardSize). + * + * - maxSize - the optional declared limit on how much length can grow. + * + * - mappedSize - the actual mmaped size. Access in the range + * [0, mappedSize] will either succeed, or be handled by the wasm signal + * handlers. + * + * The below diagram shows the layout of the wasm heap. The wasm-visible + * portion of the heap starts at 0. There is one extra page prior to the + * start of the wasm heap which contains the WasmArrayRawBuffer struct at + * its end (i.e. right before the start of the WASM heap). + * + * WasmArrayRawBuffer + * \ ArrayBufferObject::dataPointer() + * \ / + * \ | + * ______|_|____________________________________________________________ + * |______|_|______________|___________________|____________|____________| + * 0 length maxSize boundsCheckLimit mappedSize + * + * \_______________________/ + * COMMITED + * \____________________________________________/ + * SLOP + * \_____________________________________________________________________/ + * MAPPED + * + * Invariants: + * - length only increases + * - 0 <= length <= maxSize (if present) <= boundsCheckLimit <= mappedSize + * - on ARM boundsCheckLimit must be a valid ARM immediate. + * - if maxSize is not specified, boundsCheckLimit/mappedSize may grow. They + * are otherwise constant. + * + * NOTE: For asm.js on non-x64 we guarantee that + * + * length == maxSize == boundsCheckLimit == mappedSize + * + * That is, signal handlers will not be invoked, since they cannot emulate + * asm.js accesses on non-x64 architectures. + * + * The region between length and mappedSize is the SLOP - an area where we use + * signal handlers to catch things that slip by bounds checks. Logically it has + * two parts: + * + * - from length to boundsCheckLimit - this part of the SLOP serves to catch + * accesses to memory we have reserved but not yet grown into. This allows us + * to grow memory up to max (when present) without having to patch/update the + * bounds checks. + * + * - from boundsCheckLimit to mappedSize - this part of the SLOP allows us to + * bounds check against base pointers and fold some constant offsets inside + * loads. This enables better Bounds Check Elimination. + * + */ + +MOZ_MUST_USE bool WasmArrayRawBuffer::growToSizeInPlace(BufferSize oldSize, + BufferSize newSize) { + MOZ_ASSERT(newSize.get() >= oldSize.get()); + MOZ_ASSERT_IF(maxSize(), newSize.get() <= maxSize().value()); + MOZ_ASSERT(newSize.get() <= mappedSize()); + + size_t delta = newSize.get() - oldSize.get(); + MOZ_ASSERT(delta % wasm::PageSize == 0); + + uint8_t* dataEnd = dataPointer() + oldSize.get(); + MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0); + + if (delta && !CommitBufferMemory(dataEnd, delta)) { + return false; + } + + length_ = newSize; + + return true; +} + +bool WasmArrayRawBuffer::extendMappedSize(uint64_t maxSize) { + size_t newMappedSize = wasm::ComputeMappedSize(maxSize); + MOZ_ASSERT(mappedSize_ <= newMappedSize); + if (mappedSize_ == newMappedSize) { + return true; + } + + if (!ExtendBufferMapping(dataPointer(), mappedSize_, newMappedSize)) { + return false; + } + + mappedSize_ = newMappedSize; + return true; +} + +void WasmArrayRawBuffer::tryGrowMaxSizeInPlace(uint64_t deltaMaxSize) { + CheckedInt newMaxSize = maxSize_.value(); + newMaxSize += deltaMaxSize; + MOZ_ASSERT(newMaxSize.isValid()); + MOZ_ASSERT(newMaxSize.value() % wasm::PageSize == 0); + + if (!extendMappedSize(newMaxSize.value())) { + return; + } + + maxSize_ = Some(newMaxSize.value()); +} + +/* static */ +WasmArrayRawBuffer* WasmArrayRawBuffer::Allocate(BufferSize numBytes, + const Maybe& maxSize, + const Maybe& mapped) { + size_t mappedSize = + mapped.isSome() + ? *mapped + : wasm::ComputeMappedSize(maxSize.valueOr(numBytes.get())); + + MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize()); + MOZ_RELEASE_ASSERT(numBytes.get() <= SIZE_MAX - gc::SystemPageSize()); + MOZ_RELEASE_ASSERT(numBytes.get() <= maxSize.valueOr(UINT32_MAX)); + MOZ_ASSERT(numBytes.get() % gc::SystemPageSize() == 0); + MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0); + + uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize(); + uint64_t numBytesWithHeader = numBytes.get() + gc::SystemPageSize(); + + void* data = + MapBufferMemory((size_t)mappedSizeWithHeader, (size_t)numBytesWithHeader); + if (!data) { + return nullptr; + } + + uint8_t* base = reinterpret_cast(data) + gc::SystemPageSize(); + uint8_t* header = base - sizeof(WasmArrayRawBuffer); + + auto rawBuf = + new (header) WasmArrayRawBuffer(base, maxSize, mappedSize, numBytes); + return rawBuf; +} + +/* static */ +void WasmArrayRawBuffer::Release(void* mem) { + WasmArrayRawBuffer* header = + (WasmArrayRawBuffer*)((uint8_t*)mem - sizeof(WasmArrayRawBuffer)); + + MOZ_RELEASE_ASSERT(header->mappedSize() <= SIZE_MAX - gc::SystemPageSize()); + size_t mappedSizeWithHeader = header->mappedSize() + gc::SystemPageSize(); + + UnmapBufferMemory(header->basePointer(), mappedSizeWithHeader); +} + +WasmArrayRawBuffer* ArrayBufferObject::BufferContents::wasmBuffer() const { + MOZ_RELEASE_ASSERT(kind_ == WASM); + return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer)); +} + +template +static bool CreateSpecificWasmBuffer( + JSContext* cx, uint32_t initialSize, const Maybe& maxSize, + wasm::MemoryKind memKind, + MutableHandleArrayBufferObjectMaybeShared maybeSharedObject) { + bool useHugeMemory = wasm::IsHugeMemoryEnabled(); + + MOZ_RELEASE_ASSERT(memKind == wasm::MemoryKind::Memory32); + + Maybe clampedMaxSize = maxSize; + if (clampedMaxSize) { +#ifdef JS_64BIT + // On 64-bit platforms when we aren't using huge memory, clamp + // clampedMaxSize to a smaller value that satisfies the 32-bit invariants + // clampedMaxSize + wasm::PageSize < UINT32_MAX and clampedMaxSize % + // wasm::PageSize == 0 + if (!useHugeMemory && + clampedMaxSize.value() >= (UINT32_MAX - wasm::PageSize)) { + uint64_t clamp = (wasm::MaxMemory32LimitField - 2) * wasm::PageSize; + MOZ_ASSERT(clamp < UINT32_MAX); + MOZ_ASSERT(initialSize <= clamp); + clampedMaxSize = Some(clamp); + } +#else + static_assert(sizeof(uintptr_t) == 4, "assuming not 64 bit implies 32 bit"); + + // On 32-bit platforms, prevent applications specifying a large max + // (like UINT32_MAX) from unintentially OOMing the browser: they just + // want "a lot of memory". Maintain the invariant that + // initialSize <= clampedMaxSize. + static const uint64_t OneGiB = 1 << 30; + static_assert(wasm::HighestValidARMImmediate > OneGiB, + "computing mapped size on ARM requires clamped max size"); + uint64_t clamp = std::max(OneGiB, uint64_t(initialSize)); + clampedMaxSize = Some(std::min(clamp, *clampedMaxSize)); +#endif + } + + Maybe mappedSize; + +#ifdef WASM_SUPPORTS_HUGE_MEMORY + if (useHugeMemory) { + mappedSize = Some(wasm::HugeMappedSize); + } +#endif + + RawbufT* buffer = + RawbufT::Allocate(BufferSize(initialSize), clampedMaxSize, mappedSize); + if (!buffer) { + if (useHugeMemory) { + WarnNumberASCII(cx, JSMSG_WASM_HUGE_MEMORY_FAILED); + if (cx->isExceptionPending()) { + cx->clearPendingException(); + } + + ReportOutOfMemory(cx); + return false; + } + + // If we fail, and have a clampedMaxSize, try to reserve the biggest chunk + // in the range [initialSize, clampedMaxSize) using log backoff. + if (!clampedMaxSize) { + wasm::Log(cx, "new Memory({initial=%" PRIu32 " bytes}) failed", + initialSize); + ReportOutOfMemory(cx); + return false; + } + + uint64_t cur = clampedMaxSize.value() / 2; + + for (; cur > initialSize; cur /= 2) { + uint64_t clampedMaxSize = RoundUp(cur, wasm::PageSize); + buffer = RawbufT::Allocate(BufferSize(initialSize), Some(clampedMaxSize), + mappedSize); + if (buffer) { + break; + } + } + + if (!buffer) { + wasm::Log(cx, "new Memory({initial=%" PRIu32 " bytes}) failed", + initialSize); + ReportOutOfMemory(cx); + return false; + } + + // Try to grow our chunk as much as possible. + for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2) { + buffer->tryGrowMaxSizeInPlace(RoundUp(d, wasm::PageSize)); + } + } + + // ObjT::createFromNewRawBuffer assumes ownership of |buffer| even in case + // of failure. + RootedArrayBufferObjectMaybeShared object( + cx, ObjT::createFromNewRawBuffer(cx, buffer, BufferSize(initialSize))); + if (!object) { + return false; + } + + maybeSharedObject.set(object); + + // See MaximumLiveMappedBuffers comment above. + if (liveBufferCount > StartSyncFullGCAtLiveBufferCount) { + JS::PrepareForFullGC(cx); + JS::NonIncrementalGC(cx, GC_NORMAL, JS::GCReason::TOO_MUCH_WASM_MEMORY); + allocatedSinceLastTrigger = 0; + } else if (liveBufferCount > StartTriggeringAtLiveBufferCount) { + allocatedSinceLastTrigger++; + if (allocatedSinceLastTrigger > AllocatedBuffersPerTrigger) { + Unused << cx->runtime()->gc.triggerGC(JS::GCReason::TOO_MUCH_WASM_MEMORY); + allocatedSinceLastTrigger = 0; + } + } else { + allocatedSinceLastTrigger = 0; + } + + if (clampedMaxSize) { + if (useHugeMemory) { + wasm::Log(cx, + "new Memory({initial:%" PRIu32 " bytes, maximum:%" PRIu64 + " bytes}) succeeded", + initialSize, *clampedMaxSize); + } else { + wasm::Log(cx, + "new Memory({initial:%" PRIu32 " bytes, maximum:%" PRIu64 + " bytes}) succeeded " + "with internal maximum of %" PRIu64, + initialSize, *clampedMaxSize, object->wasmMaxSize().value()); + } + } else { + wasm::Log(cx, "new Memory({initial:%" PRIu32 " bytes}) succeeded", + initialSize); + } + + return true; +} + +bool js::CreateWasmBuffer(JSContext* cx, wasm::MemoryKind memKind, + const wasm::Limits& memory, + MutableHandleArrayBufferObjectMaybeShared buffer) { + MOZ_ASSERT(memory.initial % wasm::PageSize == 0); + MOZ_RELEASE_ASSERT(cx->wasm().haveSignalHandlers); + MOZ_RELEASE_ASSERT(memory.initial <= + ArrayBufferObject::maxBufferByteLength()); + + if (memory.shared == wasm::Shareable::True) { + if (!cx->realm()->creationOptions().getSharedMemoryAndAtomicsEnabled()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_WASM_NO_SHMEM_LINK); + return false; + } + return CreateSpecificWasmBuffer( + cx, uint32_t(memory.initial), memory.maximum, memKind, buffer); + } + return CreateSpecificWasmBuffer( + cx, uint32_t(memory.initial), memory.maximum, memKind, buffer); +} + +bool ArrayBufferObject::prepareForAsmJS() { + MOZ_ASSERT(byteLength().get() % wasm::PageSize == 0, + "prior size checking should have guaranteed page-size multiple"); + MOZ_ASSERT(byteLength().get() > 0, + "prior size checking should have excluded empty buffers"); + + switch (bufferKind()) { + case MALLOCED: + case MAPPED: + case EXTERNAL: + // It's okay if this uselessly sets the flag a second time. + setIsPreparedForAsmJS(); + return true; + + case INLINE_DATA: + static_assert(wasm::PageSize > MaxInlineBytes, + "inline data must be too small to be a page size multiple"); + MOZ_ASSERT_UNREACHABLE( + "inline-data buffers should be implicitly excluded by size checks"); + return false; + + case NO_DATA: + MOZ_ASSERT_UNREACHABLE( + "size checking should have excluded detached or empty buffers"); + return false; + + // asm.js code and associated buffers are potentially long-lived. Yet a + // buffer of user-owned data *must* be detached by the user before the + // user-owned data is disposed. No caller wants to use a user-owned + // ArrayBuffer with asm.js, so just don't support this and avoid a mess of + // complexity. + case USER_OWNED: + // wasm buffers can be detached at any time. + case WASM: + MOZ_ASSERT(!isPreparedForAsmJS()); + return false; + + case BAD1: + MOZ_ASSERT_UNREACHABLE("invalid bufferKind() encountered"); + return false; + } + + MOZ_ASSERT_UNREACHABLE("non-exhaustive kind-handling switch?"); + return false; +} + +ArrayBufferObject::BufferContents ArrayBufferObject::createMappedContents( + int fd, size_t offset, size_t length) { + void* data = + gc::AllocateMappedContent(fd, offset, length, ARRAY_BUFFER_ALIGNMENT); + return BufferContents::createMapped(data); +} + +uint8_t* ArrayBufferObject::inlineDataPointer() const { + return static_cast(fixedData(JSCLASS_RESERVED_SLOTS(&class_))); +} + +uint8_t* ArrayBufferObject::dataPointer() const { + return static_cast(getFixedSlot(DATA_SLOT).toPrivate()); +} + +SharedMem ArrayBufferObject::dataPointerShared() const { + return SharedMem::unshared(getFixedSlot(DATA_SLOT).toPrivate()); +} + +ArrayBufferObject::FreeInfo* ArrayBufferObject::freeInfo() const { + MOZ_ASSERT(isExternal()); + return reinterpret_cast(inlineDataPointer()); +} + +void ArrayBufferObject::releaseData(JSFreeOp* fop) { + switch (bufferKind()) { + case INLINE_DATA: + // Inline data doesn't require releasing. + break; + case MALLOCED: + fop->free_(this, dataPointer(), byteLength().get(), + MemoryUse::ArrayBufferContents); + break; + case NO_DATA: + // There's nothing to release if there's no data. + MOZ_ASSERT(dataPointer() == nullptr); + break; + case USER_OWNED: + // User-owned data is released by, well, the user. + break; + case MAPPED: + gc::DeallocateMappedContent(dataPointer(), byteLength().get()); + fop->removeCellMemory(this, associatedBytes(), + MemoryUse::ArrayBufferContents); + break; + case WASM: + WasmArrayRawBuffer::Release(dataPointer()); + fop->removeCellMemory(this, byteLength().get(), + MemoryUse::ArrayBufferContents); + break; + case EXTERNAL: + if (freeInfo()->freeFunc) { + // The analyzer can't know for sure whether the embedder-supplied + // free function will GC. We give the analyzer a hint here. + // (Doing a GC in the free function is considered a programmer + // error.) + JS::AutoSuppressGCAnalysis nogc; + freeInfo()->freeFunc(dataPointer(), freeInfo()->freeUserData); + } + break; + case BAD1: + MOZ_CRASH("invalid BufferKind encountered"); + break; + } +} + +void ArrayBufferObject::setDataPointer(BufferContents contents) { + setFixedSlot(DATA_SLOT, PrivateValue(contents.data())); + setFlags((flags() & ~KIND_MASK) | contents.kind()); + + if (isExternal()) { + auto info = freeInfo(); + info->freeFunc = contents.freeFunc(); + info->freeUserData = contents.freeUserData(); + } +} + +BufferSize ArrayBufferObject::byteLength() const { + return BufferSize(size_t(getFixedSlot(BYTE_LENGTH_SLOT).toPrivate())); +} + +inline size_t ArrayBufferObject::associatedBytes() const { + if (bufferKind() == MALLOCED) { + return byteLength().get(); + } + if (bufferKind() == MAPPED) { + return RoundUp(byteLength().get(), js::gc::SystemPageSize()); + } + MOZ_CRASH("Unexpected buffer kind"); +} + +void ArrayBufferObject::setByteLength(BufferSize length) { + MOZ_ASSERT(length.get() <= maxBufferByteLength()); + setFixedSlot(BYTE_LENGTH_SLOT, PrivateValue(length.get())); +} + +size_t ArrayBufferObject::wasmMappedSize() const { + if (isWasm()) { + return contents().wasmBuffer()->mappedSize(); + } + return byteLength().deprecatedGetUint32(); +} + +size_t js::WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().wasmMappedSize(); + } + return buf->as().wasmMappedSize(); +} + +Maybe ArrayBufferObject::wasmMaxSize() const { + if (isWasm()) { + return contents().wasmBuffer()->maxSize(); + } + return Some(byteLength().deprecatedGetUint32()); +} + +Maybe js::WasmArrayBufferMaxSize( + const ArrayBufferObjectMaybeShared* buf) { + if (buf->is()) { + return buf->as().wasmMaxSize(); + } + return buf->as().wasmMaxSize(); +} + +static void CheckStealPreconditions(Handle buffer, + JSContext* cx) { + cx->check(buffer); + + MOZ_ASSERT(!buffer->isDetached(), "can't steal from a detached buffer"); + MOZ_ASSERT(!buffer->isPreparedForAsmJS(), + "asm.js-prepared buffers don't have detachable/stealable data"); + MOZ_ASSERT(!buffer->hasTypedObjectViews(), + "buffers for typed objects don't have detachable/stealable data"); +} + +/* static */ +bool ArrayBufferObject::wasmGrowToSizeInPlace( + BufferSize newSize, HandleArrayBufferObject oldBuf, + MutableHandleArrayBufferObject newBuf, JSContext* cx) { + CheckStealPreconditions(oldBuf, cx); + + MOZ_ASSERT(oldBuf->isWasm()); + + // On failure, do not throw and ensure that the original buffer is + // unmodified and valid. After WasmArrayRawBuffer::growToSizeInPlace(), the + // wasm-visible length of the buffer has been increased so it must be the + // last fallible operation. + + // Note, caller must guard on limit appropriate for the memory type + if (newSize.get() > ArrayBufferObject::maxBufferByteLength()) { + return false; + } + + newBuf.set(ArrayBufferObject::createEmpty(cx)); + if (!newBuf) { + cx->clearPendingException(); + return false; + } + + MOZ_ASSERT(newBuf->isNoData()); + + if (!oldBuf->contents().wasmBuffer()->growToSizeInPlace(oldBuf->byteLength(), + newSize)) { + return false; + } + + // Extract the grown contents from |oldBuf|. + BufferContents oldContents = oldBuf->contents(); + + // Overwrite |oldBuf|'s data pointer *without* releasing old data. + oldBuf->setDataPointer(BufferContents::createNoData()); + + // Detach |oldBuf| now that doing so won't release |oldContents|. + RemoveCellMemory(oldBuf, oldBuf->byteLength().get(), + MemoryUse::ArrayBufferContents); + ArrayBufferObject::detach(cx, oldBuf); + + // Set |newBuf|'s contents to |oldBuf|'s original contents. + newBuf->initialize(newSize, oldContents); + AddCellMemory(newBuf, newSize.get(), MemoryUse::ArrayBufferContents); + + return true; +} + +/* static */ +bool ArrayBufferObject::wasmMovingGrowToSize( + BufferSize newSize, HandleArrayBufferObject oldBuf, + MutableHandleArrayBufferObject newBuf, JSContext* cx) { + // On failure, do not throw and ensure that the original buffer is + // unmodified and valid. + + // Note, caller must guard on the limit appropriate to the memory type + if (newSize.get() > ArrayBufferObject::maxBufferByteLength()) { + return false; + } + + if (wasm::ComputeMappedSize(newSize.get()) <= oldBuf->wasmMappedSize() || + oldBuf->contents().wasmBuffer()->extendMappedSize(newSize.get())) { + return wasmGrowToSizeInPlace(newSize, oldBuf, newBuf, cx); + } + + newBuf.set(ArrayBufferObject::createEmpty(cx)); + if (!newBuf) { + cx->clearPendingException(); + return false; + } + + WasmArrayRawBuffer* newRawBuf = + WasmArrayRawBuffer::Allocate(newSize, Nothing(), Nothing()); + if (!newRawBuf) { + return false; + } + + AddCellMemory(newBuf, newSize.get(), MemoryUse::ArrayBufferContents); + + BufferContents contents = + BufferContents::createWasm(newRawBuf->dataPointer()); + newBuf->initialize(BufferSize(newSize), contents); + + memcpy(newBuf->dataPointer(), oldBuf->dataPointer(), + oldBuf->byteLength().get()); + ArrayBufferObject::detach(cx, oldBuf); + return true; +} + +uint32_t ArrayBufferObject::flags() const { + return uint32_t(getFixedSlot(FLAGS_SLOT).toInt32()); +} + +void ArrayBufferObject::setFlags(uint32_t flags) { + setFixedSlot(FLAGS_SLOT, Int32Value(flags)); +} + +static inline js::gc::AllocKind GetArrayBufferGCObjectKind(size_t numSlots) { + if (numSlots <= 4) { + return js::gc::AllocKind::ARRAYBUFFER4; + } + if (numSlots <= 8) { + return js::gc::AllocKind::ARRAYBUFFER8; + } + if (numSlots <= 12) { + return js::gc::AllocKind::ARRAYBUFFER12; + } + return js::gc::AllocKind::ARRAYBUFFER16; +} + +ArrayBufferObject* ArrayBufferObject::createForContents( + JSContext* cx, BufferSize nbytes, BufferContents contents) { + MOZ_ASSERT(contents); + MOZ_ASSERT(contents.kind() != INLINE_DATA); + MOZ_ASSERT(contents.kind() != NO_DATA); + MOZ_ASSERT(contents.kind() != WASM); + + // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2). + if (!CheckArrayBufferTooLarge(cx, nbytes.get())) { + return nullptr; + } + + // Some |contents| kinds need to store extra data in the ArrayBuffer beyond a + // data pointer. If needed for the particular kind, add extra fixed slots to + // the ArrayBuffer for use as raw storage to store such information. + size_t reservedSlots = JSCLASS_RESERVED_SLOTS(&class_); + + size_t nAllocated = 0; + size_t nslots = reservedSlots; + if (contents.kind() == USER_OWNED) { + // No accounting to do in this case. + } else if (contents.kind() == EXTERNAL) { + // Store the FreeInfo in the inline data slots so that we + // don't use up slots for it in non-refcounted array buffers. + size_t freeInfoSlots = HowMany(sizeof(FreeInfo), sizeof(Value)); + MOZ_ASSERT(reservedSlots + freeInfoSlots <= NativeObject::MAX_FIXED_SLOTS, + "FreeInfo must fit in inline slots"); + nslots += freeInfoSlots; + } else { + // The ABO is taking ownership, so account the bytes against the zone. + nAllocated = nbytes.get(); + if (contents.kind() == MAPPED) { + nAllocated = RoundUp(nbytes.get(), js::gc::SystemPageSize()); + } else { + MOZ_ASSERT(contents.kind() == MALLOCED, + "should have handled all possible callers' kinds"); + } + } + + MOZ_ASSERT(!(class_.flags & JSCLASS_HAS_PRIVATE)); + gc::AllocKind allocKind = GetArrayBufferGCObjectKind(nslots); + + AutoSetNewObjectMetadata metadata(cx); + Rooted buffer( + cx, NewObjectWithClassProto(cx, nullptr, allocKind, + TenuredObject)); + if (!buffer) { + return nullptr; + } + + MOZ_ASSERT(!gc::IsInsideNursery(buffer), + "ArrayBufferObject has a finalizer that must be called to not " + "leak in some cases, so it can't be nursery-allocated"); + + buffer->initialize(nbytes, contents); + + if (contents.kind() == MAPPED || contents.kind() == MALLOCED) { + AddCellMemory(buffer, nAllocated, MemoryUse::ArrayBufferContents); + } + + return buffer; +} + +template +/* static */ std::tuple +ArrayBufferObject::createBufferAndData( + JSContext* cx, BufferSize nbytes, AutoSetNewObjectMetadata&, + JS::Handle proto /* = nullptr */) { + MOZ_ASSERT(nbytes.get() <= ArrayBufferObject::maxBufferByteLength(), + "caller must validate the byte count it passes"); + + // Try fitting the data inline with the object by repurposing fixed-slot + // storage. Add extra fixed slots if necessary to accomplish this, but don't + // exceed the maximum number of fixed slots! + size_t nslots = JSCLASS_RESERVED_SLOTS(&class_); + ArrayBufferContents data; + if (nbytes.get() <= MaxInlineBytes) { + int newSlots = HowMany(nbytes.get(), sizeof(Value)); + MOZ_ASSERT(int(nbytes.get()) <= newSlots * int(sizeof(Value))); + + nslots += newSlots; + } else { + data = FillType == FillContents::Uninitialized + ? AllocateUninitializedArrayBufferContents(cx, nbytes) + : AllocateArrayBufferContents(cx, nbytes); + if (!data) { + return {nullptr, nullptr}; + } + } + + MOZ_ASSERT(!(class_.flags & JSCLASS_HAS_PRIVATE)); + gc::AllocKind allocKind = GetArrayBufferGCObjectKind(nslots); + + ArrayBufferObject* buffer = NewObjectWithClassProto( + cx, proto, allocKind, GenericObject); + if (!buffer) { + return {nullptr, nullptr}; + } + + MOZ_ASSERT(!gc::IsInsideNursery(buffer), + "ArrayBufferObject has a finalizer that must be called to not " + "leak in some cases, so it can't be nursery-allocated"); + + uint8_t* toFill; + if (data) { + toFill = data.release(); + buffer->initialize(nbytes, BufferContents::createMalloced(toFill)); + AddCellMemory(buffer, nbytes.get(), MemoryUse::ArrayBufferContents); + } else { + toFill = + static_cast(buffer->initializeToInlineData(nbytes.get())); + if constexpr (FillType == FillContents::Zero) { + memset(toFill, 0, nbytes.get()); + } + } + + return {buffer, toFill}; +} + +/* static */ ArrayBufferObject* ArrayBufferObject::copy( + JSContext* cx, JS::Handle unwrappedArrayBuffer) { + if (unwrappedArrayBuffer->isDetached()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_TYPED_ARRAY_DETACHED); + return nullptr; + } + + BufferSize nbytes = unwrappedArrayBuffer->byteLength(); + + AutoSetNewObjectMetadata metadata(cx); + auto [buffer, toFill] = createBufferAndData( + cx, nbytes, metadata, nullptr); + if (!buffer) { + return nullptr; + } + + std::uninitialized_copy_n(unwrappedArrayBuffer->dataPointer(), nbytes.get(), + toFill); + return buffer; +} + +ArrayBufferObject* ArrayBufferObject::createZeroed( + JSContext* cx, BufferSize nbytes, HandleObject proto /* = nullptr */) { + // 24.1.1.1, step 3 (Inlined 6.2.6.1 CreateByteDataBlock, step 2). + if (!CheckArrayBufferTooLarge(cx, nbytes.get())) { + return nullptr; + } + + AutoSetNewObjectMetadata metadata(cx); + auto [buffer, toFill] = + createBufferAndData(cx, nbytes, metadata, proto); + Unused << toFill; + return buffer; +} + +ArrayBufferObject* ArrayBufferObject::createForTypedObject(JSContext* cx, + BufferSize nbytes) { + ArrayBufferObject* buffer = createZeroed(cx, nbytes); + if (buffer) { + buffer->setHasTypedObjectViews(); + } + return buffer; +} + +ArrayBufferObject* ArrayBufferObject::createEmpty(JSContext* cx) { + AutoSetNewObjectMetadata metadata(cx); + ArrayBufferObject* obj = NewBuiltinClassInstance(cx); + if (!obj) { + return nullptr; + } + + obj->initialize(BufferSize(0), BufferContents::createNoData()); + return obj; +} + +ArrayBufferObject* ArrayBufferObject::createFromNewRawBuffer( + JSContext* cx, WasmArrayRawBuffer* rawBuffer, BufferSize initialSize) { + AutoSetNewObjectMetadata metadata(cx); + ArrayBufferObject* buffer = NewBuiltinClassInstance(cx); + if (!buffer) { + WasmArrayRawBuffer::Release(rawBuffer->dataPointer()); + return nullptr; + } + + MOZ_ASSERT(initialSize.get() == rawBuffer->byteLength().get()); + + buffer->setByteLength(initialSize); + buffer->setFlags(0); + buffer->setFirstView(nullptr); + + auto contents = BufferContents::createWasm(rawBuffer->dataPointer()); + buffer->setDataPointer(contents); + + AddCellMemory(buffer, initialSize.get(), MemoryUse::ArrayBufferContents); + + return buffer; +} + +/* static */ uint8_t* ArrayBufferObject::stealMallocedContents( + JSContext* cx, Handle buffer) { + CheckStealPreconditions(buffer, cx); + + switch (buffer->bufferKind()) { + case MALLOCED: { + uint8_t* stolenData = buffer->dataPointer(); + MOZ_ASSERT(stolenData); + + RemoveCellMemory(buffer, buffer->byteLength().get(), + MemoryUse::ArrayBufferContents); + + // Overwrite the old data pointer *without* releasing the contents + // being stolen. + buffer->setDataPointer(BufferContents::createNoData()); + + // Detach |buffer| now that doing so won't free |stolenData|. + ArrayBufferObject::detach(cx, buffer); + return stolenData; + } + + case INLINE_DATA: + case NO_DATA: + case USER_OWNED: + case MAPPED: + case EXTERNAL: { + // We can't use these data types directly. Make a copy to return. + ArrayBufferContents copiedData = NewCopiedBufferContents(cx, buffer); + if (!copiedData) { + return nullptr; + } + + // Detach |buffer|. This immediately releases the currently owned + // contents, freeing or unmapping data in the MAPPED and EXTERNAL cases. + ArrayBufferObject::detach(cx, buffer); + return copiedData.release(); + } + + case WASM: + MOZ_ASSERT_UNREACHABLE( + "wasm buffers aren't stealable except by a " + "memory.grow operation that shouldn't call this " + "function"); + return nullptr; + + case BAD1: + MOZ_ASSERT_UNREACHABLE("bad kind when stealing malloc'd data"); + return nullptr; + } + + MOZ_ASSERT_UNREACHABLE("garbage kind computed"); + return nullptr; +} + +/* static */ ArrayBufferObject::BufferContents +ArrayBufferObject::extractStructuredCloneContents( + JSContext* cx, Handle buffer) { + CheckStealPreconditions(buffer, cx); + + BufferContents contents = buffer->contents(); + + switch (contents.kind()) { + case INLINE_DATA: + case NO_DATA: + case USER_OWNED: { + ArrayBufferContents copiedData = NewCopiedBufferContents(cx, buffer); + if (!copiedData) { + return BufferContents::createFailed(); + } + + ArrayBufferObject::detach(cx, buffer); + return BufferContents::createMalloced(copiedData.release()); + } + + case MALLOCED: + case MAPPED: { + MOZ_ASSERT(contents); + + RemoveCellMemory(buffer, buffer->associatedBytes(), + MemoryUse::ArrayBufferContents); + + // Overwrite the old data pointer *without* releasing old data. + buffer->setDataPointer(BufferContents::createNoData()); + + // Detach |buffer| now that doing so won't release |oldContents|. + ArrayBufferObject::detach(cx, buffer); + return contents; + } + + case WASM: + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_WASM_NO_TRANSFER); + return BufferContents::createFailed(); + + case EXTERNAL: + MOZ_ASSERT_UNREACHABLE( + "external ArrayBuffer shouldn't have passed the " + "structured-clone preflighting"); + break; + + case BAD1: + MOZ_ASSERT_UNREACHABLE("bad kind when stealing malloc'd data"); + break; + } + + MOZ_ASSERT_UNREACHABLE("garbage kind computed"); + return BufferContents::createFailed(); +} + +/* static */ +void ArrayBufferObject::addSizeOfExcludingThis( + JSObject* obj, mozilla::MallocSizeOf mallocSizeOf, JS::ClassInfo* info) { + ArrayBufferObject& buffer = AsArrayBuffer(obj); + switch (buffer.bufferKind()) { + case INLINE_DATA: + // Inline data's size should be reported by this object's size-class + // reporting. + break; + case MALLOCED: + if (buffer.isPreparedForAsmJS()) { + info->objectsMallocHeapElementsAsmJS += + mallocSizeOf(buffer.dataPointer()); + } else { + info->objectsMallocHeapElementsNormal += + mallocSizeOf(buffer.dataPointer()); + } + break; + case NO_DATA: + // No data is no memory. + MOZ_ASSERT(buffer.dataPointer() == nullptr); + break; + case USER_OWNED: + // User-owned data should be accounted for by the user. + break; + case MAPPED: + info->objectsNonHeapElementsNormal += buffer.byteLength().get(); + break; + case WASM: + info->objectsNonHeapElementsWasm += buffer.byteLength().get(); + MOZ_ASSERT(buffer.wasmMappedSize() >= buffer.byteLength().get()); + info->wasmGuardPages += + buffer.wasmMappedSize() - buffer.byteLength().get(); + break; + case EXTERNAL: + MOZ_CRASH("external buffers not currently supported"); + break; + case BAD1: + MOZ_CRASH("bad bufferKind()"); + } +} + +/* static */ +void ArrayBufferObject::finalize(JSFreeOp* fop, JSObject* obj) { + obj->as().releaseData(fop); +} + +/* static */ +void ArrayBufferObject::copyData(Handle toBuffer, + size_t toIndex, + Handle fromBuffer, + size_t fromIndex, size_t count) { + MOZ_ASSERT(toBuffer->byteLength().get() >= count); + MOZ_ASSERT(toBuffer->byteLength().get() >= toIndex + count); + MOZ_ASSERT(fromBuffer->byteLength().get() >= fromIndex); + MOZ_ASSERT(fromBuffer->byteLength().get() >= fromIndex + count); + + memcpy(toBuffer->dataPointer() + toIndex, + fromBuffer->dataPointer() + fromIndex, count); +} + +/* static */ +size_t ArrayBufferObject::objectMoved(JSObject* obj, JSObject* old) { + ArrayBufferObject& dst = obj->as(); + const ArrayBufferObject& src = old->as(); + + // Fix up possible inline data pointer. + if (src.hasInlineData()) { + dst.setFixedSlot(DATA_SLOT, PrivateValue(dst.inlineDataPointer())); + } + + return 0; +} + +JSObject* ArrayBufferObject::firstView() { + return getFixedSlot(FIRST_VIEW_SLOT).isObject() + ? &getFixedSlot(FIRST_VIEW_SLOT).toObject() + : nullptr; +} + +void ArrayBufferObject::setFirstView(ArrayBufferViewObject* view) { + setFixedSlot(FIRST_VIEW_SLOT, ObjectOrNullValue(view)); +} + +bool ArrayBufferObject::addView(JSContext* cx, ArrayBufferViewObject* view) { + if (!firstView()) { + setFirstView(view); + return true; + } + + return ObjectRealm::get(this).innerViews.get().addView(cx, this, view); +} + +/* + * InnerViewTable + */ + +constexpr size_t VIEW_LIST_MAX_LENGTH = 500; + +bool InnerViewTable::addView(JSContext* cx, ArrayBufferObject* buffer, + JSObject* view) { + // ArrayBufferObject entries are only added when there are multiple views. + MOZ_ASSERT(buffer->firstView()); + + Map::AddPtr p = map.lookupForAdd(buffer); + + MOZ_ASSERT(!gc::IsInsideNursery(buffer)); + bool addToNursery = nurseryKeysValid && gc::IsInsideNursery(view); + + if (p) { + ViewVector& views = p->value(); + MOZ_ASSERT(!views.empty()); + + if (addToNursery) { + // Only add the entry to |nurseryKeys| if it isn't already there. + if (views.length() >= VIEW_LIST_MAX_LENGTH) { + // To avoid quadratic blowup, skip the loop below if we end up + // adding enormous numbers of views for the same object. + nurseryKeysValid = false; + } else { + for (size_t i = 0; i < views.length(); i++) { + if (gc::IsInsideNursery(views[i])) { + addToNursery = false; + break; + } + } + } + } + + if (!views.append(view)) { + ReportOutOfMemory(cx); + return false; + } + } else { + if (!map.add(p, buffer, ViewVector(cx->zone()))) { + ReportOutOfMemory(cx); + return false; + } + // ViewVector has one inline element, so the first insertion is + // guaranteed to succeed. + MOZ_ALWAYS_TRUE(p->value().append(view)); + } + + if (addToNursery && !nurseryKeys.append(buffer)) { + nurseryKeysValid = false; + } + + return true; +} + +InnerViewTable::ViewVector* InnerViewTable::maybeViewsUnbarriered( + ArrayBufferObject* buffer) { + Map::Ptr p = map.lookup(buffer); + if (p) { + return &p->value(); + } + return nullptr; +} + +void InnerViewTable::removeViews(ArrayBufferObject* buffer) { + Map::Ptr p = map.lookup(buffer); + MOZ_ASSERT(p); + + map.remove(p); +} + +/* static */ +bool InnerViewTable::sweepEntry(JSObject** pkey, ViewVector& views) { + if (IsAboutToBeFinalizedUnbarriered(pkey)) { + return true; + } + + MOZ_ASSERT(!views.empty()); + size_t i = 0; + while (i < views.length()) { + if (IsAboutToBeFinalizedUnbarriered(&views[i])) { + // If the current element is garbage then remove it from the + // vector by moving the last one into its place. + views[i] = views.back(); + views.popBack(); + } else { + i++; + } + } + + return views.empty(); +} + +void InnerViewTable::sweep() { map.sweep(); } + +void InnerViewTable::sweepAfterMinorGC() { + MOZ_ASSERT(needsSweepAfterMinorGC()); + + if (nurseryKeysValid) { + for (size_t i = 0; i < nurseryKeys.length(); i++) { + JSObject* buffer = MaybeForwarded(nurseryKeys[i]); + Map::Ptr p = map.lookup(buffer); + if (!p) { + continue; + } + + if (sweepEntry(&p->mutableKey(), p->value())) { + map.remove(buffer); + } + } + nurseryKeys.clear(); + } else { + // Do the required sweeping by looking at every map entry. + nurseryKeys.clear(); + sweep(); + + nurseryKeysValid = true; + } +} + +size_t InnerViewTable::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) { + size_t vectorSize = 0; + for (Map::Enum e(map); !e.empty(); e.popFront()) { + vectorSize += e.front().value().sizeOfExcludingThis(mallocSizeOf); + } + + return vectorSize + map.shallowSizeOfExcludingThis(mallocSizeOf) + + nurseryKeys.sizeOfExcludingThis(mallocSizeOf); +} + +template <> +bool JSObject::is() const { + return is() || is(); +} + +JS_FRIEND_API uint32_t JS::GetArrayBufferByteLength(JSObject* obj) { + ArrayBufferObject* aobj = obj->maybeUnwrapAs(); + return aobj ? aobj->byteLength().deprecatedGetUint32() : 0; +} + +JS_FRIEND_API uint8_t* JS::GetArrayBufferData(JSObject* obj, + bool* isSharedMemory, + const JS::AutoRequireNoGC&) { + ArrayBufferObject* aobj = obj->maybeUnwrapIf(); + if (!aobj) { + return nullptr; + } + *isSharedMemory = false; + return aobj->dataPointer(); +} + +static ArrayBufferObject* UnwrapArrayBuffer( + JSContext* cx, JS::Handle maybeArrayBuffer) { + JSObject* obj = CheckedUnwrapStatic(maybeArrayBuffer); + if (!obj) { + ReportAccessDenied(cx); + return nullptr; + } + + if (!obj->is()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_ARRAYBUFFER_REQUIRED); + return nullptr; + } + + return &obj->as(); +} + +JS_FRIEND_API bool JS::DetachArrayBuffer(JSContext* cx, HandleObject obj) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj); + + Rooted unwrappedBuffer(cx, UnwrapArrayBuffer(cx, obj)); + if (!unwrappedBuffer) { + return false; + } + + if (unwrappedBuffer->isWasm() || unwrappedBuffer->isPreparedForAsmJS()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_WASM_NO_TRANSFER); + return false; + } + + AutoRealm ar(cx, unwrappedBuffer); + ArrayBufferObject::detach(cx, unwrappedBuffer); + return true; +} + +JS_FRIEND_API bool JS::IsDetachedArrayBufferObject(JSObject* obj) { + ArrayBufferObject* aobj = obj->maybeUnwrapIf(); + if (!aobj) { + return false; + } + + return aobj->isDetached(); +} + +JS_FRIEND_API JSObject* JS::NewArrayBuffer(JSContext* cx, uint32_t nbytes) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + return ArrayBufferObject::createZeroed(cx, BufferSize(nbytes)); +} + +JS_PUBLIC_API JSObject* JS::NewArrayBufferWithContents(JSContext* cx, + size_t nbytes, + void* data) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + MOZ_ASSERT_IF(!data, nbytes == 0); + + if (!data) { + // Don't pass nulled contents to |createForContents|. + return ArrayBufferObject::createZeroed(cx, BufferSize(0)); + } + + using BufferContents = ArrayBufferObject::BufferContents; + + BufferContents contents = BufferContents::createMalloced(data); + return ArrayBufferObject::createForContents(cx, BufferSize(nbytes), contents); +} + +JS_PUBLIC_API JSObject* JS::CopyArrayBuffer(JSContext* cx, + Handle arrayBuffer) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + MOZ_ASSERT(arrayBuffer != nullptr); + + Rooted unwrappedSource( + cx, UnwrapArrayBuffer(cx, arrayBuffer)); + if (!unwrappedSource) { + return nullptr; + } + + return ArrayBufferObject::copy(cx, unwrappedSource); +} + +JS_PUBLIC_API JSObject* JS::NewExternalArrayBuffer( + JSContext* cx, size_t nbytes, void* data, + JS::BufferContentsFreeFunc freeFunc, void* freeUserData) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + MOZ_ASSERT(data); + MOZ_ASSERT(nbytes > 0); + + using BufferContents = ArrayBufferObject::BufferContents; + + BufferContents contents = + BufferContents::createExternal(data, freeFunc, freeUserData); + return ArrayBufferObject::createForContents(cx, BufferSize(nbytes), contents); +} + +JS_PUBLIC_API JSObject* JS::NewArrayBufferWithUserOwnedContents(JSContext* cx, + size_t nbytes, + void* data) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + MOZ_ASSERT(data); + + using BufferContents = ArrayBufferObject::BufferContents; + + BufferContents contents = BufferContents::createUserOwned(data); + return ArrayBufferObject::createForContents(cx, BufferSize(nbytes), contents); +} + +JS_FRIEND_API bool JS::IsArrayBufferObject(JSObject* obj) { + return obj->canUnwrapAs(); +} + +JS_FRIEND_API bool JS::ArrayBufferHasData(JSObject* obj) { + return !obj->unwrapAs().isDetached(); +} + +JS_FRIEND_API JSObject* JS::UnwrapArrayBuffer(JSObject* obj) { + return obj->maybeUnwrapIf(); +} + +JS_FRIEND_API JSObject* JS::UnwrapSharedArrayBuffer(JSObject* obj) { + return obj->maybeUnwrapIf(); +} + +JS_PUBLIC_API void* JS::StealArrayBufferContents(JSContext* cx, + HandleObject obj) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj); + + Rooted unwrappedBuffer(cx, UnwrapArrayBuffer(cx, obj)); + if (!unwrappedBuffer) { + return nullptr; + } + + if (unwrappedBuffer->isDetached()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_TYPED_ARRAY_DETACHED); + return nullptr; + } + + if (unwrappedBuffer->isWasm() || unwrappedBuffer->isPreparedForAsmJS()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_WASM_NO_TRANSFER); + return nullptr; + } + + AutoRealm ar(cx, unwrappedBuffer); + return ArrayBufferObject::stealMallocedContents(cx, unwrappedBuffer); +} + +JS_PUBLIC_API JSObject* JS::NewMappedArrayBufferWithContents(JSContext* cx, + size_t nbytes, + void* data) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + + MOZ_ASSERT(data); + + using BufferContents = ArrayBufferObject::BufferContents; + + BufferContents contents = BufferContents::createMapped(data); + return ArrayBufferObject::createForContents(cx, BufferSize(nbytes), contents); +} + +JS_PUBLIC_API void* JS::CreateMappedArrayBufferContents(int fd, size_t offset, + size_t length) { + return ArrayBufferObject::createMappedContents(fd, offset, length).data(); +} + +JS_PUBLIC_API void JS::ReleaseMappedArrayBufferContents(void* contents, + size_t length) { + gc::DeallocateMappedContent(contents, length); +} + +JS_FRIEND_API bool JS::IsMappedArrayBufferObject(JSObject* obj) { + ArrayBufferObject* aobj = obj->maybeUnwrapIf(); + if (!aobj) { + return false; + } + + return aobj->isMapped(); +} + +JS_FRIEND_API JSObject* JS::GetObjectAsArrayBuffer(JSObject* obj, + uint32_t* length, + uint8_t** data) { + ArrayBufferObject* aobj = obj->maybeUnwrapIf(); + if (!aobj) { + return nullptr; + } + + *length = aobj->byteLength().deprecatedGetUint32(); + *data = aobj->dataPointer(); + + return aobj; +} + +JS_FRIEND_API void JS::GetArrayBufferLengthAndData(JSObject* obj, + uint32_t* length, + bool* isSharedMemory, + uint8_t** data) { + MOZ_ASSERT(IsArrayBuffer(obj)); + *length = AsArrayBuffer(obj).byteLength().deprecatedGetUint32(); + *data = AsArrayBuffer(obj).dataPointer(); + *isSharedMemory = false; +} diff --git a/js/src/vm/ArrayBufferObject.h b/js/src/vm/ArrayBufferObject.h new file mode 100644 index 0000000000..3b68610e5b --- /dev/null +++ b/js/src/vm/ArrayBufferObject.h @@ -0,0 +1,659 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayBufferObject_h +#define vm_ArrayBufferObject_h + +#include "mozilla/Maybe.h" + +#include // std::tuple + +#include "builtin/TypedArrayConstants.h" +#include "gc/Memory.h" +#include "gc/ZoneAllocator.h" +#include "js/ArrayBuffer.h" +#include "js/GCHashTable.h" +#include "vm/JSObject.h" +#include "vm/Runtime.h" +#include "vm/SharedMem.h" +#include "wasm/WasmTypes.h" + +namespace js { + +class ArrayBufferViewObject; +class WasmArrayRawBuffer; + +// Create a new mapping of size `mappedSize` with an initially committed prefix +// of size `initialCommittedSize`. Both arguments denote bytes and must be +// multiples of the page size, with `initialCommittedSize` <= `mappedSize`. +// Returns nullptr on failure. +void* MapBufferMemory(size_t mappedSize, size_t initialCommittedSize); + +// Commit additional memory in an existing mapping. `dataEnd` must be the +// correct value for the end of the existing committed area, and `delta` must be +// a byte amount to grow the mapping by, and must be a multiple of the page +// size. Returns false on failure. +bool CommitBufferMemory(void* dataEnd, size_t delta); + +// Extend an existing mapping by adding uncommited pages to it. `dataStart` +// must be the pointer to the start of the existing mapping, `mappedSize` the +// size of the existing mapping, and `newMappedSize` the size of the extended +// mapping (sizes in bytes), with `mappedSize` <= `newMappedSize`. Both sizes +// must be divisible by the page size. Returns false on failure. +bool ExtendBufferMapping(void* dataStart, size_t mappedSize, + size_t newMappedSize); + +// Remove an existing mapping. `dataStart` must be the pointer to the start of +// the mapping, and `mappedSize` the size of that mapping. +void UnmapBufferMemory(void* dataStart, size_t mappedSize); + +// Return the number of currently live mapped buffers. +int32_t LiveMappedBufferCount(); + +// The inheritance hierarchy for the various classes relating to typed arrays +// is as follows. +// +// +// - JSObject +// - TypedObject (declared in wasm/TypedObject.h) +// - NativeObject +// - ArrayBufferObjectMaybeShared +// - ArrayBufferObject +// - SharedArrayBufferObject +// - ArrayBufferViewObject +// - DataViewObject +// - TypedArrayObject (declared in vm/TypedArrayObject.h) +// - TypedArrayObjectTemplate +// - Int8ArrayObject +// - Uint8ArrayObject +// - ... +// +// Note that |TypedArrayObjectTemplate| is just an implementation +// detail that makes implementing its various subclasses easier. +// +// ArrayBufferObject and SharedArrayBufferObject are unrelated data types: +// the racy memory of the latter cannot substitute for the non-racy memory of +// the former; the non-racy memory of the former cannot be used with the +// atomics; the former can be detached and the latter not. Hence they have been +// separated completely. +// +// Most APIs will only accept ArrayBufferObject. ArrayBufferObjectMaybeShared +// exists as a join point to allow APIs that can take or use either, notably +// AsmJS. +// +// In contrast with the separation of ArrayBufferObject and +// SharedArrayBufferObject, the TypedArray types can map either. +// +// The possible data ownership and reference relationships with ArrayBuffers +// and related classes are enumerated below. These are the possible locations +// for typed data: +// +// (1) malloc'ed or mmap'ed data owned by an ArrayBufferObject. +// (2) Data allocated inline with an ArrayBufferObject. +// (3) Data allocated inline with a TypedArrayObject. +// (4) Data allocated inline with an InlineTypedObject. +// +// An ArrayBufferObject may point to any of these sources of data, except (3). +// All array buffer views may point to any of these sources of data, except +// that (3) may only be pointed to by the typed array the data is inline with. +// +// During a minor GC, (3) and (4) may move. During a compacting GC, (2), (3), +// and (4) may move. + +class ArrayBufferObjectMaybeShared; + +mozilla::Maybe WasmArrayBufferMaxSize( + const ArrayBufferObjectMaybeShared* buf); +size_t WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf); + +// Class wrapping an ArrayBuffer or ArrayBufferView byte offset or length. +class BufferSize { + size_t size_ = 0; + + public: + explicit BufferSize(size_t size) : size_(size) {} + + size_t get() const { return size_; } + + // For consumers that still need to be audited or changed to support large + // buffers. + uint32_t deprecatedGetUint32() const { + MOZ_ASSERT(size_ <= INT32_MAX); + return size_; + } +}; + +class ArrayBufferObjectMaybeShared : public NativeObject { + public: + inline BufferSize byteLength() const; + inline bool isDetached() const; + inline SharedMem dataPointerEither(); + + // WebAssembly support: + // Note: the eventual goal is to remove this from ArrayBuffer and have + // (Shared)ArrayBuffers alias memory owned by some wasm::Memory object. + + mozilla::Maybe wasmMaxSize() const { + return WasmArrayBufferMaxSize(this); + } + size_t wasmMappedSize() const { return WasmArrayBufferMappedSize(this); } + + inline bool isPreparedForAsmJS() const; + inline bool isWasm() const; +}; + +using RootedArrayBufferObjectMaybeShared = + Rooted; +using HandleArrayBufferObjectMaybeShared = + Handle; +using MutableHandleArrayBufferObjectMaybeShared = + MutableHandle; + +/* + * ArrayBufferObject + * + * This class holds the underlying raw buffer that the various ArrayBufferViews + * (eg DataViewObject, the TypedArrays, TypedObjects) access. It can be created + * explicitly and used to construct an ArrayBufferView, or can be created + * lazily when it is first accessed for a TypedArrayObject or TypedObject that + * doesn't have an explicit buffer. + * + * ArrayBufferObject (or really the underlying memory) /is not racy/: the + * memory is private to a single worker. + */ +class ArrayBufferObject : public ArrayBufferObjectMaybeShared { + static bool byteLengthGetterImpl(JSContext* cx, const CallArgs& args); + static bool fun_slice_impl(JSContext* cx, const CallArgs& args); + + public: + static const uint8_t DATA_SLOT = 0; + static const uint8_t BYTE_LENGTH_SLOT = 1; + static const uint8_t FIRST_VIEW_SLOT = 2; + static const uint8_t FLAGS_SLOT = 3; + + static const uint8_t RESERVED_SLOTS = 4; + + static const size_t ARRAY_BUFFER_ALIGNMENT = 8; + + static_assert(FLAGS_SLOT == JS_ARRAYBUFFER_FLAGS_SLOT, + "self-hosted code with burned-in constants must get the " + "right flags slot"); + + static bool supportLargeBuffers; + + // The length of an ArrayBuffer or SharedArrayBuffer can be at most + // INT32_MAX. Allow a larger limit on 64-bit platforms if the experimental + // large-buffers flag is used. + static size_t maxBufferByteLength() { +#ifdef JS_64BIT + if (supportLargeBuffers) { + return size_t(8) * 1024 * 1024 * 1024; // 8 GB. + } +#endif + return INT32_MAX; + } + + /** The largest number of bytes that can be stored inline. */ + static constexpr size_t MaxInlineBytes = + (NativeObject::MAX_FIXED_SLOTS - RESERVED_SLOTS) * sizeof(JS::Value); + + public: + enum OwnsState { + DoesntOwnData = 0, + OwnsData = 1, + }; + + enum BufferKind { + /** Inline data kept in the repurposed slots of this ArrayBufferObject. */ + INLINE_DATA = 0b000, + + /* Data allocated using the SpiderMonkey allocator. */ + MALLOCED = 0b001, + + /** + * No bytes are associated with this buffer. (This could be because the + * buffer is detached, because it's an internal, newborn buffer not yet + * overwritten with user-exposable semantics, or some other reason. The + * point is, don't read precise language semantics into this kind.) + */ + NO_DATA = 0b010, + + /** + * User-owned memory. The associated buffer must be manually detached + * before the user invalidates (deallocates, reuses the storage of, &c.) + * the user-owned memory. + */ + USER_OWNED = 0b011, + + WASM = 0b100, + MAPPED = 0b101, + EXTERNAL = 0b110, + + // These kind-values are currently invalid. We intend to expand valid + // BufferKinds in the future to either partly or fully use these values. + BAD1 = 0b111, + + KIND_MASK = 0b111 + }; + + protected: + enum ArrayBufferFlags { + // The flags also store the BufferKind + BUFFER_KIND_MASK = BufferKind::KIND_MASK, + + DETACHED = 0b1000, + + // Views of this buffer include only typed objects. + TYPED_OBJECT_VIEWS = 0b1'0000, + + // This MALLOCED, MAPPED, or EXTERNAL buffer has been prepared for asm.js + // and cannot henceforth be transferred/detached. (WASM, USER_OWNED, and + // INLINE_DATA buffers can't be prepared for asm.js -- although if an + // INLINE_DATA buffer is used with asm.js, it's silently rewritten into a + // MALLOCED buffer which *can* be prepared.) + FOR_ASMJS = 0b10'0000, + }; + + static_assert(JS_ARRAYBUFFER_DETACHED_FLAG == DETACHED, + "self-hosted code with burned-in constants must use the " + "correct DETACHED bit value"); + + enum class FillContents { Zero, Uninitialized }; + + template + static std::tuple createBufferAndData( + JSContext* cx, BufferSize nbytes, AutoSetNewObjectMetadata&, + JS::Handle proto = nullptr); + + public: + class BufferContents { + uint8_t* data_; + BufferKind kind_; + JS::BufferContentsFreeFunc free_; + void* freeUserData_; + + friend class ArrayBufferObject; + + BufferContents(uint8_t* data, BufferKind kind, + JS::BufferContentsFreeFunc freeFunc = nullptr, + void* freeUserData = nullptr) + : data_(data), + kind_(kind), + free_(freeFunc), + freeUserData_(freeUserData) { + MOZ_ASSERT((kind_ & ~KIND_MASK) == 0); + MOZ_ASSERT_IF(free_ || freeUserData_, kind_ == EXTERNAL); + + // It is the caller's responsibility to ensure that the + // BufferContents does not outlive the data. + } + + public: + static BufferContents createInlineData(void* data) { + return BufferContents(static_cast(data), INLINE_DATA); + } + + static BufferContents createMalloced(void* data) { + return BufferContents(static_cast(data), MALLOCED); + } + + static BufferContents createNoData() { + return BufferContents(nullptr, NO_DATA); + } + + static BufferContents createUserOwned(void* data) { + return BufferContents(static_cast(data), USER_OWNED); + } + + static BufferContents createWasm(void* data) { + return BufferContents(static_cast(data), WASM); + } + + static BufferContents createMapped(void* data) { + return BufferContents(static_cast(data), MAPPED); + } + + static BufferContents createExternal(void* data, + JS::BufferContentsFreeFunc freeFunc, + void* freeUserData = nullptr) { + return BufferContents(static_cast(data), EXTERNAL, freeFunc, + freeUserData); + } + + static BufferContents createFailed() { + // There's no harm in tagging this as MALLOCED, even tho obviously it + // isn't. And adding an extra tag purely for this case is a complication + // that presently appears avoidable. + return BufferContents(nullptr, MALLOCED); + } + + uint8_t* data() const { return data_; } + BufferKind kind() const { return kind_; } + JS::BufferContentsFreeFunc freeFunc() const { return free_; } + void* freeUserData() const { return freeUserData_; } + + explicit operator bool() const { return data_ != nullptr; } + WasmArrayRawBuffer* wasmBuffer() const; + }; + + static const JSClass class_; + static const JSClass protoClass_; + + static bool byteLengthGetter(JSContext* cx, unsigned argc, Value* vp); + + static bool fun_slice(JSContext* cx, unsigned argc, Value* vp); + + static bool fun_isView(JSContext* cx, unsigned argc, Value* vp); + + static bool fun_species(JSContext* cx, unsigned argc, Value* vp); + + static bool class_constructor(JSContext* cx, unsigned argc, Value* vp); + + static ArrayBufferObject* createForContents(JSContext* cx, BufferSize nbytes, + BufferContents contents); + + static ArrayBufferObject* copy( + JSContext* cx, JS::Handle unwrappedArrayBuffer); + + static ArrayBufferObject* createZeroed(JSContext* cx, BufferSize nbytes, + HandleObject proto = nullptr); + + static ArrayBufferObject* createForTypedObject(JSContext* cx, + BufferSize nbytes); + + // Create an ArrayBufferObject that is safely finalizable and can later be + // initialize()d to become a real, content-visible ArrayBufferObject. + static ArrayBufferObject* createEmpty(JSContext* cx); + + // Create an ArrayBufferObject using the provided buffer and size. Assumes + // ownership of |buffer| even in case of failure, i.e. on failure |buffer| + // is deallocated. + static ArrayBufferObject* createFromNewRawBuffer(JSContext* cx, + WasmArrayRawBuffer* buffer, + BufferSize initialSize); + + static void copyData(Handle toBuffer, size_t toIndex, + Handle fromBuffer, size_t fromIndex, + size_t count); + + static size_t objectMoved(JSObject* obj, JSObject* old); + + static uint8_t* stealMallocedContents(JSContext* cx, + Handle buffer); + + static BufferContents extractStructuredCloneContents( + JSContext* cx, Handle buffer); + + static void addSizeOfExcludingThis(JSObject* obj, + mozilla::MallocSizeOf mallocSizeOf, + JS::ClassInfo* info); + + // ArrayBufferObjects (strongly) store the first view added to them, while + // later views are (weakly) stored in the compartment's InnerViewTable + // below. Buffers usually only have one view, so this slot optimizes for + // the common case. Avoiding entries in the InnerViewTable saves memory and + // non-incrementalized sweep time. + JSObject* firstView(); + + bool addView(JSContext* cx, ArrayBufferViewObject* view); + + // Detach this buffer from its original memory. (This necessarily makes + // views of this buffer unusable for modifying that original memory.) + static void detach(JSContext* cx, Handle buffer); + + static constexpr size_t offsetOfByteLengthSlot() { + return getFixedSlotOffset(BYTE_LENGTH_SLOT); + } + + private: + void setFirstView(ArrayBufferViewObject* view); + + uint8_t* inlineDataPointer() const; + + struct FreeInfo { + JS::BufferContentsFreeFunc freeFunc; + void* freeUserData; + }; + FreeInfo* freeInfo() const; + + public: + uint8_t* dataPointer() const; + SharedMem dataPointerShared() const; + BufferSize byteLength() const; + + BufferContents contents() const { + if (isExternal()) { + return BufferContents(dataPointer(), EXTERNAL, freeInfo()->freeFunc, + freeInfo()->freeUserData); + } + return BufferContents(dataPointer(), bufferKind()); + } + bool hasInlineData() const { return dataPointer() == inlineDataPointer(); } + + void releaseData(JSFreeOp* fop); + + BufferKind bufferKind() const { + return BufferKind(flags() & BUFFER_KIND_MASK); + } + + bool isInlineData() const { return bufferKind() == INLINE_DATA; } + bool isMalloced() const { return bufferKind() == MALLOCED; } + bool isNoData() const { return bufferKind() == NO_DATA; } + bool hasUserOwnedData() const { return bufferKind() == USER_OWNED; } + + bool isWasm() const { return bufferKind() == WASM; } + bool isMapped() const { return bufferKind() == MAPPED; } + bool isExternal() const { return bufferKind() == EXTERNAL; } + + bool isDetached() const { return flags() & DETACHED; } + bool isPreparedForAsmJS() const { return flags() & FOR_ASMJS; } + + // WebAssembly support: + + /** + * Prepare this ArrayBuffer for use with asm.js. Returns true on success, + * false on failure. This function reports no errors. + */ + MOZ_MUST_USE bool prepareForAsmJS(); + + size_t wasmMappedSize() const; + mozilla::Maybe wasmMaxSize() const; + static MOZ_MUST_USE bool wasmGrowToSizeInPlace( + BufferSize newSize, Handle oldBuf, + MutableHandle newBuf, JSContext* cx); + static MOZ_MUST_USE bool wasmMovingGrowToSize( + BufferSize newSize, Handle oldBuf, + MutableHandle newBuf, JSContext* cx); + + static void finalize(JSFreeOp* fop, JSObject* obj); + + static BufferContents createMappedContents(int fd, size_t offset, + size_t length); + + bool hasTypedObjectViews() const { return flags() & TYPED_OBJECT_VIEWS; } + + protected: + void setDataPointer(BufferContents contents); + void setByteLength(BufferSize length); + + size_t associatedBytes() const; + + uint32_t flags() const; + void setFlags(uint32_t flags); + + void setHasTypedObjectViews() { setFlags(flags() | TYPED_OBJECT_VIEWS); } + + void setIsDetached() { setFlags(flags() | DETACHED); } + void setIsPreparedForAsmJS() { + MOZ_ASSERT(!isWasm()); + MOZ_ASSERT(!hasUserOwnedData()); + MOZ_ASSERT(!isInlineData()); + MOZ_ASSERT(isMalloced() || isMapped() || isExternal()); + setFlags(flags() | FOR_ASMJS); + } + + void initialize(BufferSize byteLength, BufferContents contents) { + setByteLength(byteLength); + setFlags(0); + setFirstView(nullptr); + setDataPointer(contents); + } + + void* initializeToInlineData(size_t byteLength) { + void* data = inlineDataPointer(); + initialize(BufferSize(byteLength), BufferContents::createInlineData(data)); + return data; + } +}; + +using RootedArrayBufferObject = Rooted; +using HandleArrayBufferObject = Handle; +using MutableHandleArrayBufferObject = MutableHandle; + +bool CreateWasmBuffer(JSContext* cx, wasm::MemoryKind memKind, + const wasm::Limits& memory, + MutableHandleArrayBufferObjectMaybeShared buffer); + +/* + * Tests for ArrayBufferObject, like obj->is(). + */ +bool IsArrayBuffer(HandleValue v); +bool IsArrayBuffer(JSObject* obj); +ArrayBufferObject& AsArrayBuffer(JSObject* obj); + +/* + * Ditto for ArrayBufferObjectMaybeShared. + */ +bool IsArrayBufferMaybeShared(HandleValue v); +bool IsArrayBufferMaybeShared(JSObject* obj); +ArrayBufferObjectMaybeShared& AsArrayBufferMaybeShared(JSObject* obj); + +// Per-compartment table that manages the relationship between array buffers +// and the views that use their storage. +class InnerViewTable { + public: + typedef Vector ViewVector; + + friend class ArrayBufferObject; + + private: + struct MapGCPolicy { + static bool needsSweep(JSObject** key, ViewVector* value) { + return InnerViewTable::sweepEntry(key, *value); + } + }; + + // This key is a raw pointer and not a WeakHeapPtr because the post-barrier + // would hold nursery-allocated entries live unconditionally. It is a very + // common pattern in low-level and performance-oriented JavaScript to create + // hundreds or thousands of very short lived temporary views on a larger + // buffer; having to tenured all of these would be a catastrophic performance + // regression. Thus, it is vital that nursery pointers in this map not be held + // live. Special support is required in the minor GC, implemented in + // sweepAfterMinorGC. + using Map = GCHashMap, + ZoneAllocPolicy, MapGCPolicy>; + + // For all objects sharing their storage with some other view, this maps + // the object to the list of such views. All entries in this map are weak. + Map map; + + // List of keys from innerViews where either the source or at least one + // target is in the nursery. The raw pointer to a JSObject is allowed here + // because this vector is cleared after every minor collection. Users in + // sweepAfterMinorCollection must be careful to use MaybeForwarded before + // touching these pointers. + Vector nurseryKeys; + + // Whether nurseryKeys is a complete list. + bool nurseryKeysValid; + + // Sweep an entry during GC, returning whether the entry should be removed. + static bool sweepEntry(JSObject** pkey, ViewVector& views); + + bool addView(JSContext* cx, ArrayBufferObject* buffer, JSObject* view); + ViewVector* maybeViewsUnbarriered(ArrayBufferObject* obj); + void removeViews(ArrayBufferObject* obj); + + public: + explicit InnerViewTable(Zone* zone) : map(zone), nurseryKeysValid(true) {} + + // Remove references to dead objects in the table and update table entries + // to reflect moved objects. + void sweep(); + void sweepAfterMinorGC(); + + bool needsSweep() const { return map.needsSweep(); } + + bool needsSweepAfterMinorGC() const { + return !nurseryKeys.empty() || !nurseryKeysValid; + } + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf); +}; + +template +class MutableWrappedPtrOperations + : public WrappedPtrOperations { + InnerViewTable& table() { return static_cast(this)->get(); } + + public: + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) { + return table().sizeOfExcludingThis(mallocSizeOf); + } +}; + +class WasmArrayRawBuffer { + mozilla::Maybe maxSize_; + size_t mappedSize_; // Not including the header page + BufferSize length_; + + protected: + WasmArrayRawBuffer(uint8_t* buffer, const mozilla::Maybe& maxSize, + size_t mappedSize, BufferSize length) + : maxSize_(maxSize), mappedSize_(mappedSize), length_(length) { + MOZ_ASSERT(buffer == dataPointer()); + } + + public: + static WasmArrayRawBuffer* Allocate(BufferSize numBytes, + const mozilla::Maybe& maxSize, + const mozilla::Maybe& mappedSize); + static void Release(void* mem); + + uint8_t* dataPointer() { + uint8_t* ptr = reinterpret_cast(this); + return ptr + sizeof(WasmArrayRawBuffer); + } + + static const WasmArrayRawBuffer* fromDataPtr(const uint8_t* dataPtr) { + return reinterpret_cast( + dataPtr - sizeof(WasmArrayRawBuffer)); + } + + uint8_t* basePointer() { return dataPointer() - gc::SystemPageSize(); } + + size_t mappedSize() const { return mappedSize_; } + + mozilla::Maybe maxSize() const { return maxSize_; } + + BufferSize byteLength() const { return length_; } + + MOZ_MUST_USE bool growToSizeInPlace(BufferSize oldSize, BufferSize newSize); + + MOZ_MUST_USE bool extendMappedSize(uint64_t maxSize); + + // Try and grow the mapped region of memory. Does not change current size. + // Does not move memory if no space to grow. + void tryGrowMaxSizeInPlace(uint64_t deltaMaxSize); +}; + +} // namespace js + +template <> +bool JSObject::is() const; + +#endif // vm_ArrayBufferObject_h diff --git a/js/src/vm/ArrayBufferObjectMaybeShared.cpp b/js/src/vm/ArrayBufferObjectMaybeShared.cpp new file mode 100644 index 0000000000..f5e544255a --- /dev/null +++ b/js/src/vm/ArrayBufferObjectMaybeShared.cpp @@ -0,0 +1,60 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "mozilla/Assertions.h" // MOZ_ASSERT + +#include // uint8_t, uint32_t + +#include "jstypes.h" // JS_PUBLIC_API + +#include "js/ArrayBufferMaybeShared.h" +#include "vm/ArrayBufferObject.h" // js::ArrayBufferObject +#include "vm/JSObject.h" // JSObject +#include "vm/SharedArrayObject.h" // js::SharedArrayBufferObject +#include "vm/SharedMem.h" // SharedMem + +using namespace js; + +JS_PUBLIC_API bool JS::IsArrayBufferObjectMaybeShared(JSObject* obj) { + return obj->canUnwrapAs(); +} + +JS_PUBLIC_API JSObject* JS::UnwrapArrayBufferMaybeShared(JSObject* obj) { + return obj->maybeUnwrapIf(); +} + +JS_PUBLIC_API void JS::GetArrayBufferMaybeSharedLengthAndData( + JSObject* obj, uint32_t* length, bool* isSharedMemory, uint8_t** data) { + MOZ_ASSERT(obj->is()); + + if (obj->is()) { + auto* buffer = &obj->as(); + *length = buffer->byteLength().deprecatedGetUint32(); + *data = buffer->dataPointerShared().unwrap(); + *isSharedMemory = true; + } else { + auto* buffer = &obj->as(); + *length = buffer->byteLength().deprecatedGetUint32(); + *data = buffer->dataPointer(); + *isSharedMemory = false; + } +} + +JS_PUBLIC_API uint8_t* JS::GetArrayBufferMaybeSharedData( + JSObject* obj, bool* isSharedMemory, const JS::AutoRequireNoGC&) { + MOZ_ASSERT(obj->maybeUnwrapIf()); + + if (ArrayBufferObject* aobj = obj->maybeUnwrapIf()) { + *isSharedMemory = false; + return aobj->dataPointer(); + } else if (SharedArrayBufferObject* saobj = + obj->maybeUnwrapIf()) { + *isSharedMemory = true; + return saobj->dataPointerShared().unwrap(); + } + + return nullptr; +} diff --git a/js/src/vm/ArrayBufferViewObject.cpp b/js/src/vm/ArrayBufferViewObject.cpp new file mode 100644 index 0000000000..07ce5c207a --- /dev/null +++ b/js/src/vm/ArrayBufferViewObject.cpp @@ -0,0 +1,298 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/ArrayBufferViewObject.h" + +#include "builtin/DataViewObject.h" +#include "gc/Nursery.h" +#include "js/experimental/TypedData.h" // JS_GetArrayBufferView{Data,Buffer,Length,ByteOffset}, JS_GetObjectAsArrayBufferView, JS_IsArrayBufferViewObject +#include "js/SharedArrayBuffer.h" +#include "vm/JSContext.h" +#include "vm/TypedArrayObject.h" + +#include "gc/Nursery-inl.h" +#include "vm/ArrayBufferObject-inl.h" +#include "vm/NativeObject-inl.h" + +using namespace js; + +/* + * This method is used to trace TypedArrayObjects and DataViewObjects. We need + * a custom tracer to move the object's data pointer if its owner was moved and + * stores its data inline. + */ +/* static */ +void ArrayBufferViewObject::trace(JSTracer* trc, JSObject* objArg) { + ArrayBufferViewObject* obj = &objArg->as(); + HeapSlot& bufSlot = obj->getFixedSlotRef(BUFFER_SLOT); + TraceEdge(trc, &bufSlot, "ArrayBufferViewObject.buffer"); + + // Update obj's data pointer if it moved. + if (bufSlot.isObject()) { + if (gc::MaybeForwardedObjectIs(&bufSlot.toObject())) { + ArrayBufferObject& buf = + gc::MaybeForwardedObjectAs(&bufSlot.toObject()); + BufferSize offset = obj->byteOffset(); + + MOZ_ASSERT_IF(buf.dataPointer() == nullptr, offset.get() == 0); + + // The data may or may not be inline with the buffer. The buffer + // can only move during a compacting GC, in which case its + // objectMoved hook has already updated the buffer's data pointer. + size_t nfixed = obj->numFixedSlotsMaybeForwarded(); + obj->setPrivateUnbarriered(nfixed, buf.dataPointer() + offset.get()); + } + } +} + +template <> +bool JSObject::is() const { + return is() || is(); +} + +void ArrayBufferViewObject::notifyBufferDetached() { + MOZ_ASSERT(!isSharedMemory()); + MOZ_ASSERT(hasBuffer()); + + setFixedSlot(LENGTH_SLOT, PrivateValue(size_t(0))); + setFixedSlot(BYTEOFFSET_SLOT, PrivateValue(size_t(0))); + + setPrivate(nullptr); +} + +/* static */ +ArrayBufferObjectMaybeShared* ArrayBufferViewObject::bufferObject( + JSContext* cx, Handle thisObject) { + if (thisObject->is()) { + Rooted typedArray(cx, + &thisObject->as()); + if (!TypedArrayObject::ensureHasBuffer(cx, typedArray)) { + return nullptr; + } + } + return thisObject->bufferEither(); +} + +bool ArrayBufferViewObject::init(JSContext* cx, + ArrayBufferObjectMaybeShared* buffer, + BufferSize byteOffset, BufferSize length, + uint32_t bytesPerElement) { + MOZ_ASSERT_IF(!buffer, byteOffset.get() == 0); + MOZ_ASSERT_IF(buffer, !buffer->isDetached()); + + MOZ_ASSERT(byteOffset.get() <= ArrayBufferObject::maxBufferByteLength()); + MOZ_ASSERT(length.get() <= ArrayBufferObject::maxBufferByteLength()); + MOZ_ASSERT(byteOffset.get() + length.get() <= + ArrayBufferObject::maxBufferByteLength()); + + MOZ_ASSERT_IF( + is(), + length.get() < TypedArrayObject::maxByteLength() / bytesPerElement); + + // The isSharedMemory property is invariant. Self-hosting code that + // sets BUFFER_SLOT or the private slot (if it does) must maintain it by + // always setting those to reference shared memory. + if (buffer && buffer->is()) { + setIsSharedMemory(); + } + + initFixedSlot(BYTEOFFSET_SLOT, PrivateValue(byteOffset.get())); + initFixedSlot(LENGTH_SLOT, PrivateValue(length.get())); + initFixedSlot(BUFFER_SLOT, ObjectOrNullValue(buffer)); + + if (buffer) { + SharedMem ptr = buffer->dataPointerEither(); + initDataPointer(ptr + byteOffset.get()); + + // Only ArrayBuffers used for inline typed objects can have + // nursery-allocated data and we shouldn't see such buffers here. + MOZ_ASSERT_IF(buffer->byteLength().get() > 0, !cx->nursery().isInside(ptr)); + } else { + MOZ_ASSERT(is()); + MOZ_ASSERT(length.get() * bytesPerElement <= + TypedArrayObject::INLINE_BUFFER_LIMIT); + void* data = fixedData(TypedArrayObject::FIXED_DATA_START); + initPrivate(data); + memset(data, 0, length.get() * bytesPerElement); +#ifdef DEBUG + if (length.get() == 0) { + uint8_t* elements = static_cast(data); + elements[0] = ZeroLengthArrayData; + } +#endif + } + +#ifdef DEBUG + if (buffer) { + size_t viewByteLength = length.get() * bytesPerElement; + size_t viewByteOffset = byteOffset.get(); + size_t bufferByteLength = buffer->byteLength().get(); + // Unwraps are safe: both are for the pointer value. + MOZ_ASSERT_IF(IsArrayBuffer(buffer), + buffer->dataPointerEither().unwrap(/*safe*/) <= + dataPointerEither().unwrap(/*safe*/)); + MOZ_ASSERT(bufferByteLength - viewByteOffset >= viewByteLength); + MOZ_ASSERT(viewByteOffset <= bufferByteLength); + } + + // Verify that the private slot is at the expected place. + MOZ_ASSERT(numFixedSlots() == DATA_SLOT); +#endif + + // ArrayBufferObjects track their views to support detaching. + if (buffer && buffer->is()) { + if (!buffer->as().addView(cx, this)) { + return false; + } + } + + return true; +} + +/* JS Friend API */ + +JS_FRIEND_API bool JS_IsArrayBufferViewObject(JSObject* obj) { + return obj->canUnwrapAs(); +} + +JS_FRIEND_API JSObject* js::UnwrapArrayBufferView(JSObject* obj) { + return obj->maybeUnwrapIf(); +} + +JS_FRIEND_API void* JS_GetArrayBufferViewData(JSObject* obj, + bool* isSharedMemory, + const JS::AutoRequireNoGC&) { + ArrayBufferViewObject* view = obj->maybeUnwrapAs(); + if (!view) { + return nullptr; + } + + *isSharedMemory = view->isSharedMemory(); + return view->dataPointerEither().unwrap( + /*safe - caller sees isSharedMemory flag*/); +} + +JS_FRIEND_API uint8_t* JS_GetArrayBufferViewFixedData(JSObject* obj, + uint8_t* buffer, + size_t bufSize) { + ArrayBufferViewObject* view = obj->maybeUnwrapAs(); + if (!view) { + return nullptr; + } + + // Disallow shared memory until it is needed. + if (view->isSharedMemory()) { + return nullptr; + } + + // TypedArrays (but not DataViews) can have inline data, in which case we + // need to copy into the given buffer. + if (view->is()) { + TypedArrayObject* ta = &view->as(); + if (ta->hasInlineElements()) { + size_t bytes = ta->byteLength().get(); + if (bytes > bufSize) { + return nullptr; // Does not fit. + } + memcpy(buffer, view->dataPointerUnshared(), bytes); + return buffer; + } + } + + return static_cast(view->dataPointerUnshared()); +} + +JS_FRIEND_API JSObject* JS_GetArrayBufferViewBuffer(JSContext* cx, + HandleObject obj, + bool* isSharedMemory) { + AssertHeapIsIdle(); + CHECK_THREAD(cx); + cx->check(obj); + + Rooted unwrappedView( + cx, obj->maybeUnwrapAs()); + if (!unwrappedView) { + ReportAccessDenied(cx); + return nullptr; + } + + ArrayBufferObjectMaybeShared* unwrappedBuffer; + { + AutoRealm ar(cx, unwrappedView); + unwrappedBuffer = ArrayBufferViewObject::bufferObject(cx, unwrappedView); + if (!unwrappedBuffer) { + return nullptr; + } + } + *isSharedMemory = unwrappedBuffer->is(); + + RootedObject buffer(cx, unwrappedBuffer); + if (!cx->compartment()->wrap(cx, &buffer)) { + return nullptr; + } + + return buffer; +} + +JS_FRIEND_API uint32_t JS_GetArrayBufferViewByteLength(JSObject* obj) { + obj = obj->maybeUnwrapAs(); + if (!obj) { + return 0; + } + BufferSize length = obj->is() + ? obj->as().byteLength() + : obj->as().byteLength(); + return length.deprecatedGetUint32(); +} + +JS_FRIEND_API uint32_t JS_GetArrayBufferViewByteOffset(JSObject* obj) { + obj = obj->maybeUnwrapAs(); + if (!obj) { + return 0; + } + BufferSize offset = obj->is() + ? obj->as().byteOffset() + : obj->as().byteOffset(); + return offset.deprecatedGetUint32(); +} + +JS_FRIEND_API JSObject* JS_GetObjectAsArrayBufferView(JSObject* obj, + uint32_t* length, + bool* isSharedMemory, + uint8_t** data) { + obj = obj->maybeUnwrapIf(); + if (!obj) { + return nullptr; + } + + js::GetArrayBufferViewLengthAndData(obj, length, isSharedMemory, data); + return obj; +} + +JS_FRIEND_API void js::GetArrayBufferViewLengthAndData(JSObject* obj, + uint32_t* length, + bool* isSharedMemory, + uint8_t** data) { + MOZ_ASSERT(obj->is()); + + BufferSize byteLength = obj->is() + ? obj->as().byteLength() + : obj->as().byteLength(); + *length = byteLength.deprecatedGetUint32(); + + ArrayBufferViewObject& view = obj->as(); + *isSharedMemory = view.isSharedMemory(); + *data = static_cast( + view.dataPointerEither().unwrap(/*safe - caller sees isShared flag*/)); +} + +JS_PUBLIC_API bool JS::IsArrayBufferViewShared(JSObject* obj) { + ArrayBufferViewObject* view = obj->maybeUnwrapAs(); + if (!view) { + return false; + } + return view->isSharedMemory(); +} diff --git a/js/src/vm/ArrayBufferViewObject.h b/js/src/vm/ArrayBufferViewObject.h new file mode 100644 index 0000000000..22439caee9 --- /dev/null +++ b/js/src/vm/ArrayBufferViewObject.h @@ -0,0 +1,165 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayBufferViewObject_h +#define vm_ArrayBufferViewObject_h + +#include "builtin/TypedArrayConstants.h" +#include "vm/ArrayBufferObject.h" +#include "vm/NativeObject.h" +#include "vm/SharedArrayObject.h" +#include "vm/SharedMem.h" + +namespace js { + +/* + * ArrayBufferViewObject + * + * Common base class for all array buffer views (DataViewObject and + * TypedArrayObject). + */ + +class ArrayBufferViewObject : public NativeObject { + public: + // Underlying (Shared)ArrayBufferObject. + static constexpr size_t BUFFER_SLOT = 0; + static_assert(BUFFER_SLOT == JS_TYPEDARRAYLAYOUT_BUFFER_SLOT, + "self-hosted code with burned-in constants must get the " + "right buffer slot"); + + // Slot containing length of the view in number of typed elements. + static constexpr size_t LENGTH_SLOT = 1; + + // Offset of view within underlying (Shared)ArrayBufferObject. + static constexpr size_t BYTEOFFSET_SLOT = 2; + + static constexpr size_t RESERVED_SLOTS = 3; + +#ifdef DEBUG + static const uint8_t ZeroLengthArrayData = 0x4A; +#endif + + // The raw pointer to the buffer memory, the "private" value. + // + // This offset is exposed for performance reasons - so that it + // need not be looked up on accesses. + static constexpr size_t DATA_SLOT = 3; + + static constexpr int lengthOffset() { + return NativeObject::getFixedSlotOffset(LENGTH_SLOT); + } + static constexpr int byteOffsetOffset() { + return NativeObject::getFixedSlotOffset(BYTEOFFSET_SLOT); + } + static constexpr int dataOffset() { + return NativeObject::getPrivateDataOffset(DATA_SLOT); + } + + private: + void* dataPointerEither_() const { + // Note, do not check whether shared or not + // Keep synced with js::GetArrayLengthAndData in jsfriendapi.h! + return static_cast(getPrivate(DATA_SLOT)); + } + + public: + MOZ_MUST_USE bool init(JSContext* cx, ArrayBufferObjectMaybeShared* buffer, + BufferSize byteOffset, BufferSize length, + uint32_t bytesPerElement); + + static ArrayBufferObjectMaybeShared* bufferObject( + JSContext* cx, Handle obj); + + void notifyBufferDetached(); + + void initDataPointer(SharedMem viewData) { + // Install a pointer to the buffer location that corresponds + // to offset zero within the typed array. + // + // The following unwrap is safe because the DATA_SLOT is + // accessed only from jitted code and from the + // dataPointerEither_() accessor above; in neither case does the + // raw pointer escape untagged into C++ code. + initPrivate(viewData.unwrap(/*safe - see above*/)); + } + + SharedMem dataPointerShared() const { + return SharedMem::shared(dataPointerEither_()); + } + SharedMem dataPointerEither() const { + if (isSharedMemory()) { + return SharedMem::shared(dataPointerEither_()); + } + return SharedMem::unshared(dataPointerEither_()); + } + void* dataPointerUnshared() const { + MOZ_ASSERT(!isSharedMemory()); + return dataPointerEither_(); + } + + Value bufferValue() const { return getFixedSlot(BUFFER_SLOT); } + bool hasBuffer() const { return bufferValue().isObject(); } + + ArrayBufferObject* bufferUnshared() const { + MOZ_ASSERT(!isSharedMemory()); + ArrayBufferObjectMaybeShared* obj = bufferEither(); + if (!obj) { + return nullptr; + } + return &obj->as(); + } + SharedArrayBufferObject* bufferShared() const { + MOZ_ASSERT(isSharedMemory()); + ArrayBufferObjectMaybeShared* obj = bufferEither(); + if (!obj) { + return nullptr; + } + return &obj->as(); + } + ArrayBufferObjectMaybeShared* bufferEither() const { + JSObject* obj = bufferValue().toObjectOrNull(); + if (!obj) { + return nullptr; + } + MOZ_ASSERT(isSharedMemory() ? obj->is() + : obj->is()); + return &obj->as(); + } + + bool hasDetachedBuffer() const { + // Shared buffers can't be detached. + if (isSharedMemory()) { + return false; + } + + // A typed array with a null buffer has never had its buffer exposed to + // become detached. + ArrayBufferObject* buffer = bufferUnshared(); + if (!buffer) { + return false; + } + + return buffer->isDetached(); + } + + BufferSize byteOffset() const { + return BufferSize(size_t(getFixedSlot(BYTEOFFSET_SLOT).toPrivate())); + } + + Value byteOffsetValue() const { + size_t offset = byteOffset().get(); + return NumberValue(offset); + } + + static void trace(JSTracer* trc, JSObject* obj); +}; + +} // namespace js + +template <> +bool JSObject::is() const; + +#endif // vm_ArrayBufferViewObject_h diff --git a/js/src/vm/ArrayObject-inl.h b/js/src/vm/ArrayObject-inl.h new file mode 100644 index 0000000000..193679fa9b --- /dev/null +++ b/js/src/vm/ArrayObject-inl.h @@ -0,0 +1,87 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayObject_inl_h +#define vm_ArrayObject_inl_h + +#include "vm/ArrayObject.h" + +#include "gc/Allocator.h" +#include "gc/GCProbes.h" +#include "vm/StringType.h" + +#include "vm/JSObject-inl.h" +#include "vm/ObjectOperations-inl.h" // js::GetElement + +namespace js { + +/* static */ inline ArrayObject* ArrayObject::createArrayInternal( + JSContext* cx, gc::AllocKind kind, gc::InitialHeap heap, HandleShape shape, + HandleObjectGroup group, AutoSetNewObjectMetadata&) { + const JSClass* clasp = group->clasp(); + MOZ_ASSERT(shape && group); + MOZ_ASSERT(clasp == shape->getObjectClass()); + MOZ_ASSERT(clasp == &ArrayObject::class_); + MOZ_ASSERT_IF(clasp->hasFinalize(), heap == gc::TenuredHeap); + + // Arrays can use their fixed slots to store elements, so can't have shapes + // which allow named properties to be stored in the fixed slots. + MOZ_ASSERT(shape->numFixedSlots() == 0); + + size_t nDynamicSlots = calculateDynamicSlots(0, shape->slotSpan(), clasp); + JSObject* obj = js::AllocateObject(cx, kind, nDynamicSlots, heap, clasp); + if (!obj) { + return nullptr; + } + + ArrayObject* aobj = static_cast(obj); + aobj->initGroup(group); + aobj->initShape(shape); + // NOTE: Dynamic slots are created internally by Allocate. + if (!nDynamicSlots) { + aobj->initEmptyDynamicSlots(); + } + + MOZ_ASSERT(clasp->shouldDelayMetadataBuilder()); + cx->realm()->setObjectPendingMetadata(cx, aobj); + + return aobj; +} + +/* static */ inline ArrayObject* ArrayObject::finishCreateArray( + ArrayObject* obj, HandleShape shape, AutoSetNewObjectMetadata& metadata) { + size_t span = shape->slotSpan(); + if (span) { + obj->initializeSlotRange(0, span); + } + + gc::gcprobes::CreateObject(obj); + + return obj; +} + +/* static */ inline ArrayObject* ArrayObject::createArray( + JSContext* cx, gc::AllocKind kind, gc::InitialHeap heap, HandleShape shape, + HandleObjectGroup group, uint32_t length, + AutoSetNewObjectMetadata& metadata) { + ArrayObject* obj = + createArrayInternal(cx, kind, heap, shape, group, metadata); + if (!obj) { + return nullptr; + } + + uint32_t capacity = + gc::GetGCKindSlots(kind) - ObjectElements::VALUES_PER_HEADER; + + obj->setFixedElements(); + new (obj->getElementsHeader()) ObjectElements(capacity, length); + + return finishCreateArray(obj, shape, metadata); +} + +} // namespace js + +#endif // vm_ArrayObject_inl_h diff --git a/js/src/vm/ArrayObject.h b/js/src/vm/ArrayObject.h new file mode 100644 index 0000000000..bdac075e65 --- /dev/null +++ b/js/src/vm/ArrayObject.h @@ -0,0 +1,64 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_ArrayObject_h +#define vm_ArrayObject_h + +#include "vm/NativeObject.h" + +namespace js { + +class AutoSetNewObjectMetadata; + +class ArrayObject : public NativeObject { + public: + // Array(x) eagerly allocates dense elements if x <= this value. Without + // the subtraction the max would roll over to the next power-of-two (4096) + // due to the way that growElements() and goodAllocated() work. + static const uint32_t EagerAllocationMaxLength = + 2048 - ObjectElements::VALUES_PER_HEADER; + + static const JSClass class_; + + bool lengthIsWritable() const { + return !getElementsHeader()->hasNonwritableArrayLength(); + } + + uint32_t length() const { return getElementsHeader()->length; } + + void setNonWritableLength(JSContext* cx) { + shrinkCapacityToInitializedLength(cx); + getElementsHeader()->setNonwritableArrayLength(); + } + + void setLength(uint32_t length) { + MOZ_ASSERT(lengthIsWritable()); + MOZ_ASSERT_IF(length != getElementsHeader()->length, + !denseElementsAreFrozen()); + getElementsHeader()->length = length; + } + + // Make an array object with the specified initial state. + static inline ArrayObject* createArray(JSContext* cx, gc::AllocKind kind, + gc::InitialHeap heap, + HandleShape shape, + HandleObjectGroup group, + uint32_t length, + AutoSetNewObjectMetadata& metadata); + + private: + // Helper for the above methods. + static inline ArrayObject* createArrayInternal( + JSContext* cx, gc::AllocKind kind, gc::InitialHeap heap, + HandleShape shape, HandleObjectGroup group, AutoSetNewObjectMetadata&); + + static inline ArrayObject* finishCreateArray( + ArrayObject* obj, HandleShape shape, AutoSetNewObjectMetadata& metadata); +}; + +} // namespace js + +#endif // vm_ArrayObject_h diff --git a/js/src/vm/AsyncFunction.cpp b/js/src/vm/AsyncFunction.cpp new file mode 100644 index 0000000000..65e3976f0e --- /dev/null +++ b/js/src/vm/AsyncFunction.cpp @@ -0,0 +1,287 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/AsyncFunction.h" + +#include "mozilla/Maybe.h" + +#include "builtin/ModuleObject.h" +#include "builtin/Promise.h" +#include "vm/FunctionFlags.h" // js::FunctionFlags +#include "vm/GeneratorObject.h" +#include "vm/GlobalObject.h" +#include "vm/Interpreter.h" +#include "vm/NativeObject.h" +#include "vm/PromiseObject.h" // js::PromiseObject +#include "vm/Realm.h" +#include "vm/SelfHosting.h" + +#include "vm/JSObject-inl.h" + +using namespace js; + +using mozilla::Maybe; + +static JSObject* CreateAsyncFunction(JSContext* cx, JSProtoKey key) { + RootedObject proto( + cx, GlobalObject::getOrCreateFunctionConstructor(cx, cx->global())); + if (!proto) { + return nullptr; + } + + HandlePropertyName name = cx->names().AsyncFunction; + return NewFunctionWithProto(cx, AsyncFunctionConstructor, 1, + FunctionFlags::NATIVE_CTOR, nullptr, name, proto, + gc::AllocKind::FUNCTION, TenuredObject); +} + +static JSObject* CreateAsyncFunctionPrototype(JSContext* cx, JSProtoKey key) { + return NewTenuredObjectWithFunctionPrototype(cx, cx->global()); +} + +static bool AsyncFunctionClassFinish(JSContext* cx, HandleObject asyncFunction, + HandleObject asyncFunctionProto) { + // Change the "constructor" property to non-writable before adding any other + // properties, so it's still the last property and can be modified without a + // dictionary-mode transition. + MOZ_ASSERT(StringEqualsAscii( + JSID_TO_LINEAR_STRING( + asyncFunctionProto->as().lastProperty()->propid()), + "constructor")); + MOZ_ASSERT(!asyncFunctionProto->as().inDictionaryMode()); + + RootedValue asyncFunctionVal(cx, ObjectValue(*asyncFunction)); + if (!DefineDataProperty(cx, asyncFunctionProto, cx->names().constructor, + asyncFunctionVal, JSPROP_READONLY)) { + return false; + } + MOZ_ASSERT(!asyncFunctionProto->as().inDictionaryMode()); + + return DefineToStringTag(cx, asyncFunctionProto, cx->names().AsyncFunction); +} + +static const ClassSpec AsyncFunctionClassSpec = { + CreateAsyncFunction, + CreateAsyncFunctionPrototype, + nullptr, + nullptr, + nullptr, + nullptr, + AsyncFunctionClassFinish, + ClassSpec::DontDefineConstructor}; + +const JSClass js::AsyncFunctionClass = {"AsyncFunction", 0, JS_NULL_CLASS_OPS, + &AsyncFunctionClassSpec}; + +enum class ResumeKind { Normal, Throw }; + +// ES2020 draft rev a09fc232c137800dbf51b6204f37fdede4ba1646 +// 6.2.3.1.1 Await Fulfilled Functions +// 6.2.3.1.2 Await Rejected Functions +static bool AsyncFunctionResume(JSContext* cx, + Handle generator, + ResumeKind kind, HandleValue valueOrReason) { + // We're enqueuing the promise job for Await before suspending the execution + // of the async function. So when either the debugger or OOM errors terminate + // the execution after JSOp::AsyncAwait, but before JSOp::Await, we're in an + // inconsistent state, because we don't have a resume index set and therefore + // don't know where to resume the async function. Return here in that case. + if (generator->isClosed()) { + return true; + } + + // The debugger sets the async function's generator object into the "running" + // state while firing debugger events to ensure the debugger can't re-enter + // the async function, cf. |AutoSetGeneratorRunning| in Debugger.cpp. Catch + // this case here by checking if the generator is already runnning. + if (generator->isRunning()) { + return true; + } + + Rooted resultPromise(cx, generator->promise()); + + RootedObject stack(cx); + Maybe asyncStack; + if (JSObject* allocationSite = resultPromise->allocationSite()) { + // The promise is created within the activation of the async function, so + // use the parent frame as the starting point for async stacks. + stack = allocationSite->as().getParent(); + if (stack) { + asyncStack.emplace( + cx, stack, "async", + JS::AutoSetAsyncStackForNewCalls::AsyncCallKind::EXPLICIT); + } + } + + MOZ_ASSERT(generator->isSuspended(), + "non-suspended generator when resuming async function"); + + // Execution context switching is handled in generator. + HandlePropertyName funName = kind == ResumeKind::Normal + ? cx->names().AsyncFunctionNext + : cx->names().AsyncFunctionThrow; + FixedInvokeArgs<1> args(cx); + args[0].set(valueOrReason); + RootedValue generatorOrValue(cx, ObjectValue(*generator)); + if (!CallSelfHostedFunction(cx, funName, generatorOrValue, args, + &generatorOrValue)) { + if (!generator->isClosed()) { + generator->setClosed(); + } + + // Handle the OOM case mentioned above. + if (resultPromise->state() == JS::PromiseState::Pending && + cx->isExceptionPending()) { + RootedValue exn(cx); + if (!GetAndClearException(cx, &exn)) { + return false; + } + return AsyncFunctionThrown(cx, resultPromise, exn); + } + return false; + } + + MOZ_ASSERT_IF(generator->isClosed(), generatorOrValue.isObject()); + MOZ_ASSERT_IF(generator->isClosed(), + &generatorOrValue.toObject() == resultPromise); + MOZ_ASSERT_IF(!generator->isClosed(), generator->isAfterAwait()); + + return true; +} + +// ES2020 draft rev a09fc232c137800dbf51b6204f37fdede4ba1646 +// 6.2.3.1.1 Await Fulfilled Functions +MOZ_MUST_USE bool js::AsyncFunctionAwaitedFulfilled( + JSContext* cx, Handle generator, + HandleValue value) { + return AsyncFunctionResume(cx, generator, ResumeKind::Normal, value); +} + +// ES2020 draft rev a09fc232c137800dbf51b6204f37fdede4ba1646 +// 6.2.3.1.2 Await Rejected Functions +MOZ_MUST_USE bool js::AsyncFunctionAwaitedRejected( + JSContext* cx, Handle generator, + HandleValue reason) { + return AsyncFunctionResume(cx, generator, ResumeKind::Throw, reason); +} + +JSObject* js::AsyncFunctionResolve( + JSContext* cx, Handle generator, + HandleValue valueOrReason, AsyncFunctionResolveKind resolveKind) { + Rooted promise(cx, generator->promise()); + if (resolveKind == AsyncFunctionResolveKind::Fulfill) { + if (!AsyncFunctionReturned(cx, promise, valueOrReason)) { + return nullptr; + } + } else { + if (!AsyncFunctionThrown(cx, promise, valueOrReason)) { + return nullptr; + } + } + return promise; +} + +const JSClass AsyncFunctionGeneratorObject::class_ = { + "AsyncFunctionGenerator", + JSCLASS_HAS_RESERVED_SLOTS(AsyncFunctionGeneratorObject::RESERVED_SLOTS), + &classOps_, +}; + +const JSClassOps AsyncFunctionGeneratorObject::classOps_ = { + nullptr, // addProperty + nullptr, // delProperty + nullptr, // enumerate + nullptr, // newEnumerate + nullptr, // resolve + nullptr, // mayResolve + nullptr, // finalize + nullptr, // call + nullptr, // hasInstance + nullptr, // construct + CallTraceMethod, // trace +}; + +AsyncFunctionGeneratorObject* AsyncFunctionGeneratorObject::create( + JSContext* cx, HandleFunction fun) { + MOZ_ASSERT(fun->isAsync() && !fun->isGenerator()); + + Rooted resultPromise(cx, CreatePromiseObjectForAsync(cx)); + if (!resultPromise) { + return nullptr; + } + + auto* obj = NewBuiltinClassInstance(cx); + if (!obj) { + return nullptr; + } + obj->initFixedSlot(PROMISE_SLOT, ObjectValue(*resultPromise)); + + // Starts in the running state. + obj->setResumeIndex(AbstractGeneratorObject::RESUME_INDEX_RUNNING); + + return obj; +} + +JSFunction* NewHandler(JSContext* cx, Native handler, + JS::Handle target) { + cx->check(target); + + JS::Handle funName = cx->names().empty; + JS::Rooted handlerFun( + cx, NewNativeFunction(cx, handler, 0, funName, + gc::AllocKind::FUNCTION_EXTENDED, GenericObject)); + if (!handlerFun) { + return nullptr; + } + handlerFun->setExtendedSlot(FunctionExtended::MODULE_SLOT, + JS::ObjectValue(*target)); + return handlerFun; +} + +AsyncFunctionGeneratorObject* AsyncFunctionGeneratorObject::create( + JSContext* cx, HandleModuleObject module) { + // TODO: Module is currently hitching a ride with + // AsyncFunctionGeneratorObject. The reason for this is we have some work in + // the JITs that make use of this object when we hit AsyncAwait bytecode. At + // the same time, top level await shares a lot of it's implementation with + // AsyncFunction. I am not sure if the best thing to do here is inherit, + // override, or do something else. Comments appreciated. + MOZ_ASSERT(module->script()->isAsync()); + + Rooted resultPromise(cx, CreatePromiseObjectForAsync(cx)); + if (!resultPromise) { + return nullptr; + } + + Rooted obj( + cx, NewBuiltinClassInstance(cx)); + if (!obj) { + return nullptr; + } + obj->initFixedSlot(PROMISE_SLOT, ObjectValue(*resultPromise)); + + RootedObject onFulfilled( + cx, NewHandler(cx, AsyncModuleExecutionFulfilledHandler, module)); + if (!onFulfilled) { + return nullptr; + } + + RootedObject onRejected( + cx, NewHandler(cx, AsyncModuleExecutionRejectedHandler, module)); + if (!onRejected) { + return nullptr; + } + + if (!JS::AddPromiseReactionsIgnoringUnhandledRejection( + cx, resultPromise, onFulfilled, onRejected)) { + return nullptr; + } + + // Starts in the running state. + obj->setResumeIndex(AbstractGeneratorObject::RESUME_INDEX_RUNNING); + + return obj; +} diff --git a/js/src/vm/AsyncFunction.h b/js/src/vm/AsyncFunction.h new file mode 100644 index 0000000000..34737c5e8e --- /dev/null +++ b/js/src/vm/AsyncFunction.h @@ -0,0 +1,325 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_AsyncFunction_h +#define vm_AsyncFunction_h + +#include "js/Class.h" +#include "vm/AsyncFunctionResolveKind.h" // AsyncFunctionResolveKind +#include "vm/GeneratorObject.h" +#include "vm/JSContext.h" +#include "vm/JSObject.h" +#include "vm/PromiseObject.h" + +// [SMDOC] Async functions +// +// # Implementation +// +// Async functions are implemented based on generators, in terms of +// suspend/resume. +// Instead of returning the generator object itself, they return the async +// function's result promise to the caller. +// +// The async function's result promise is stored in the generator object +// (js::AsyncFunctionGeneratorObject) and retrieved from it whenever the +// execution needs it. +// +// +// # Start +// +// When an async function is called, it synchronously runs until the first +// `await` or `return`. This works just like a normal function. +// +// This corresponds to steps 1-3, 5-9 of AsyncFunctionStart. +// +// AsyncFunctionStart ( promiseCapability, asyncFunctionBody ) +// https://tc39.es/ecma262/#sec-async-functions-abstract-operations-async-function-start +// +// 1. Let runningContext be the running execution context. +// 2. Let asyncContext be a copy of runningContext. +// 3. NOTE: Copying the execution state is required for the step below to +// resume its execution. It is ill-defined to resume a currently executing +// context. +// ... +// 5. Push asyncContext onto the execution context stack; asyncContext is now +// the running execution context. +// 6. Resume the suspended evaluation of asyncContext. Let result be the value +// returned by the resumed computation. +// 7. Assert: When we return here, asyncContext has already been removed from +// the execution context stack and runningContext is the currently running +// execution context. +// 8. Assert: result is a normal completion with a value of undefined. The +// possible sources of completion values are Await or, if the async +// function doesn't await anything, step 4.g above. +// 9. Return. +// +// Unlike generators, async functions don't contain JSOp::InitialYield and +// don't suspend immediately when call. +// +// +// # Return +// +// Explicit/implicit `return` is implemented with the following bytecode +// sequence: +// +// ``` +// GetAliasedVar ".generator" # VALUE .generator +// AsyncResolve 0 # PROMISE +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// ``` +// +// JSOp::Resolve (js::AsyncFunctionResolve) resolves the current async +// function's result promise. Then this sets it as the function's return value. +// (The return value is observable if the caller is still on the stack-- +// that is, the async function is returning without ever awaiting. +// Otherwise we're returning to the microtask loop, which ignores the +// return value.) +// +// This corresponds to AsyncFunctionStart steps 4.a-e. 4.g. +// +// 4. Set the code evaluation state of asyncContext such that when evaluation +// is resumed for that execution context the following steps will be +// performed: +// a. Let result be the result of evaluating asyncFunctionBody. +// b. Assert: If we return here, the async function either threw an +// exception or performed an implicit or explicit return; all awaiting +// is done. +// c. Remove asyncContext from the execution context stack and restore the +// execution context that is at the top of the execution context stack as +// the running execution context. +// d. If result.[[Type]] is normal, then +// i. Perform +// ! Call(promiseCapability.[[Resolve]], undefined, «undefined»). +// e. Else if result.[[Type]] is return, then +// i. Perform +// ! Call(promiseCapability.[[Resolve]], undefined, +// «result.[[Value]]»). +// ... +// g. Return. +// +// +// # Throw +// +// The body part of an async function is enclosed by an implicit try-catch +// block, to catch `throw` completion of the function body. +// +// If an exception is thrown by the function body, the catch block catches it +// and rejects the async function's result promise. +// +// If there's an expression in parameters, the entire parameters part is also +// enclosed by a separate implicit try-catch block. +// +// ``` +// Try # +// (parameter expressions here) # +// Goto BODY # +// +// JumpTarget from try # +// Exception # EXCEPTION +// GetAliasedVar ".generator" # EXCEPTION .generator +// AsyncResolve 1 # PROMISE +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// +// BODY: +// JumpTarget # +// Try # +// (body here) # +// +// JumpTarget from try # +// Exception # EXCEPTION +// GetAliasedVar ".generator" # EXCEPTION .generator +// AsyncResolve 1 # PROMISE +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// ``` +// +// This corresponds to AsyncFunctionStart steps 4.f-g. +// +// 4. ... +// f. Else, +// i. Assert: result.[[Type]] is throw. +// ii. Perform +// ! Call(promiseCapability.[[Reject]], undefined, +// «result.[[Value]]»). +// g. Return. +// +// +// # Await +// +// `await` is implemented with the following bytecode sequence: +// (ignoring CanSkipAwait for now, see "Optimization for await" section) +// +// ``` +// (operand here) # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// AsyncAwait # PROMISE +// +// GetAliasedVar ".generator" # PROMISE .generator +// Await 0 # RVAL GENERATOR RESUMEKIND +// +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// ``` +// +// JSOp::AsyncAwait corresponds to Await steps 1-9, and JSOp::Await corresponds +// to Await steps 10-12 in the spec. +// +// See the next section for JSOp::CheckResumeKind. +// +// After them, the async function is suspended, and if this is the first await +// in the execution, the async function's result promise is returned to the +// caller. +// +// Await +// https://tc39.es/ecma262/#await +// +// 1. Let asyncContext be the running execution context. +// 2. Let promise be ? PromiseResolve(%Promise%, value). +// 3. Let stepsFulfilled be the algorithm steps defined in Await Fulfilled +// Functions. +// 4. Let onFulfilled be ! CreateBuiltinFunction(stepsFulfilled, « +// [[AsyncContext]] »). +// 5. Set onFulfilled.[[AsyncContext]] to asyncContext. +// 6. Let stepsRejected be the algorithm steps defined in Await Rejected +// Functions. +// 7. Let onRejected be ! CreateBuiltinFunction(stepsRejected, « +// [[AsyncContext]] »). +// 8. Set onRejected.[[AsyncContext]] to asyncContext. +// 9. Perform ! PerformPromiseThen(promise, onFulfilled, onRejected). +// 10. Remove asyncContext from the execution context stack and restore the +// execution context that is at the top of the execution context stack as +// the running execution context. +// 11. Set the code evaluation state of asyncContext such that when evaluation +// is resumed with a Completion completion, the following steps of the +// algorithm that invoked Await will be performed, with completion +// available. +// 12. Return. +// 13. NOTE: This returns to the evaluation of the operation that had most +// previously resumed evaluation of asyncContext. +// +// (See comments above AsyncAwait and Await in js/src/vm/Opcodes.h for more +// details) +// +// +// # Reaction jobs and resume after await +// +// When an async function performs `await` and the operand becomes settled, a +// new reaction job for the operand is enqueued to the job queue. +// +// The reaction record for the job is marked as "this is for async function" +// (see js::AsyncFunctionAwait), and handled specially in +// js::PromiseReactionJob. +// +// When the await operand resolves (either with fulfillment or rejection), +// the async function is resumed from the job queue, by calling +// js::AsyncFunctionAwaitedFulfilled or js::AsyncFunctionAwaitedRejected +// from js::AsyncFunctionPromiseReactionJob. +// +// The execution resumes from JSOp::AfterYield, with the resolved value +// and the resume kind, either normal or throw, corresponds to fulfillment or +// rejection, on the stack. +// +// The resume kind is handled by JSOp::CheckResumeKind after that. +// +// If the resume kind is normal (=fulfillment), the async function resumes +// the execution with the resolved value as the result of `await`. +// +// If the resume kind is throw (=rejection), it throws the resolved value, +// and it will be caught by the try-catch explained above. +// +// +// # Optimization for await +// +// Suspending the execution and going into the embedding's job queue is slow +// and hard to optimize. +// +// If the following conditions are met, we don't have to perform the above +// but just use the await operand as the result of await. +// +// 1. The await operand is either non-promise or already-fulfilled promise, +// so that the result value is already known +// 2. There's no jobs in the job queue, +// so that we don't have to perform other jobs before resuming from +// await +// 3. Promise constructor/prototype are not modified, +// so that the optimization isn't visible to the user code +// +// This is implemented by the following bytecode sequence: +// +// ``` +// (operand here) # VALUE +// +// CanSkipAwait # VALUE, CAN_SKIP +// MaybeExtractAwaitValue # VALUE_OR_RVAL, CAN_SKIP +// IfNe END # VALUE +// +// JumpTarget # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// Await 0 # RVAL GENERATOR RESUMEKIND +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// +// END: +// JumpTarget # RVAL +// ``` +// +// JSOp::CanSkipAwait checks the above conditions. MaybeExtractAwaitValue will +// replace Value if it can be skipped, and then the await is jumped over. + +namespace js { + +class AsyncFunctionGeneratorObject; + +extern const JSClass AsyncFunctionClass; + +// Resume the async function when the `await` operand resolves. +// Split into two functions depending on whether the awaited value was +// fulfilled or rejected. +MOZ_MUST_USE bool AsyncFunctionAwaitedFulfilled( + JSContext* cx, Handle generator, + HandleValue value); + +MOZ_MUST_USE bool AsyncFunctionAwaitedRejected( + JSContext* cx, Handle generator, + HandleValue reason); + +// Resolve the async function's promise object with the given value and then +// return the promise object. +JSObject* AsyncFunctionResolve(JSContext* cx, + Handle generator, + HandleValue valueOrReason, + AsyncFunctionResolveKind resolveKind); + +class AsyncFunctionGeneratorObject : public AbstractGeneratorObject { + public: + enum { + PROMISE_SLOT = AbstractGeneratorObject::RESERVED_SLOTS, + + RESERVED_SLOTS + }; + + static const JSClass class_; + static const JSClassOps classOps_; + + static AsyncFunctionGeneratorObject* create(JSContext* cx, + HandleFunction asyncGen); + + static AsyncFunctionGeneratorObject* create(JSContext* cx, + HandleModuleObject module); + + PromiseObject* promise() { + return &getFixedSlot(PROMISE_SLOT).toObject().as(); + } +}; + +} // namespace js + +#endif /* vm_AsyncFunction_h */ diff --git a/js/src/vm/AsyncFunctionResolveKind.h b/js/src/vm/AsyncFunctionResolveKind.h new file mode 100644 index 0000000000..75adfcec3c --- /dev/null +++ b/js/src/vm/AsyncFunctionResolveKind.h @@ -0,0 +1,18 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_AsyncFunctionResolveKind_h +#define vm_AsyncFunctionResolveKind_h + +#include // uint8_t + +namespace js { + +enum class AsyncFunctionResolveKind : uint8_t { Fulfill, Reject }; + +} // namespace js + +#endif /* vm_AsyncFunctionResolveKind_h */ diff --git a/js/src/vm/AsyncIteration.cpp b/js/src/vm/AsyncIteration.cpp new file mode 100644 index 0000000000..64708c2dbf --- /dev/null +++ b/js/src/vm/AsyncIteration.cpp @@ -0,0 +1,700 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/AsyncIteration.h" + +#include "builtin/Array.h" + +#include "builtin/Promise.h" // js::AsyncFromSyncIteratorMethod, js::AsyncGeneratorEnqueue +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "js/PropertySpec.h" +#include "vm/FunctionFlags.h" // js::FunctionFlags +#include "vm/GeneratorObject.h" +#include "vm/GlobalObject.h" +#include "vm/Interpreter.h" +#include "vm/PlainObject.h" // js::PlainObject +#include "vm/PromiseObject.h" // js::PromiseObject +#include "vm/Realm.h" +#include "vm/SelfHosting.h" + +#include "vm/JSContext-inl.h" +#include "vm/JSObject-inl.h" +#include "vm/List-inl.h" + +using namespace js; + +// ES2019 draft rev 49b781ec80117b60f73327ef3054703a3111e40c +// 6.2.3.1.1 Await Fulfilled Functions +MOZ_MUST_USE bool js::AsyncGeneratorAwaitedFulfilled( + JSContext* cx, Handle asyncGenObj, + HandleValue value) { + return AsyncGeneratorResume(cx, asyncGenObj, CompletionKind::Normal, value); +} + +// ES2019 draft rev 49b781ec80117b60f73327ef3054703a3111e40c +// 6.2.3.1.2 Await Rejected Functions +MOZ_MUST_USE bool js::AsyncGeneratorAwaitedRejected( + JSContext* cx, Handle asyncGenObj, + HandleValue reason) { + return AsyncGeneratorResume(cx, asyncGenObj, CompletionKind::Throw, reason); +} + +// ES2019 draft rev 49b781ec80117b60f73327ef3054703a3111e40c +// 25.5.3.7 AsyncGeneratorYield, step 8.e. +MOZ_MUST_USE bool js::AsyncGeneratorYieldReturnAwaitedFulfilled( + JSContext* cx, Handle asyncGenObj, + HandleValue value) { + return AsyncGeneratorResume(cx, asyncGenObj, CompletionKind::Return, value); +} + +// ES2019 draft rev 49b781ec80117b60f73327ef3054703a3111e40c +// 25.5.3.7 AsyncGeneratorYield, step 8.c. +MOZ_MUST_USE bool js::AsyncGeneratorYieldReturnAwaitedRejected( + JSContext* cx, Handle asyncGenObj, + HandleValue reason) { + return AsyncGeneratorResume(cx, asyncGenObj, CompletionKind::Throw, reason); +} + +const JSClass AsyncFromSyncIteratorObject::class_ = { + "AsyncFromSyncIteratorObject", + JSCLASS_HAS_RESERVED_SLOTS(AsyncFromSyncIteratorObject::Slots)}; + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.1 CreateAsyncFromSyncIterator +JSObject* js::CreateAsyncFromSyncIterator(JSContext* cx, HandleObject iter, + HandleValue nextMethod) { + // Steps 1-3. + return AsyncFromSyncIteratorObject::create(cx, iter, nextMethod); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.1 CreateAsyncFromSyncIterator +/* static */ +JSObject* AsyncFromSyncIteratorObject::create(JSContext* cx, HandleObject iter, + HandleValue nextMethod) { + // Step 1. + RootedObject proto(cx, + GlobalObject::getOrCreateAsyncFromSyncIteratorPrototype( + cx, cx->global())); + if (!proto) { + return nullptr; + } + + AsyncFromSyncIteratorObject* asyncIter = + NewObjectWithGivenProto(cx, proto); + if (!asyncIter) { + return nullptr; + } + + // Step 2. + asyncIter->init(iter, nextMethod); + + // Step 3 (Call to 7.4.1 GetIterator). + // 7.4.1 GetIterator, steps 1-5 are a no-op (*). + // 7.4.1 GetIterator, steps 6-8 are implemented in bytecode. + // + // (*) With fixed. + return asyncIter; +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.2.1 %AsyncFromSyncIteratorPrototype%.next +static bool AsyncFromSyncIteratorNext(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Normal); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.2.2 %AsyncFromSyncIteratorPrototype%.return +static bool AsyncFromSyncIteratorReturn(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Return); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.1.4.2.3 %AsyncFromSyncIteratorPrototype%.throw +static bool AsyncFromSyncIteratorThrow(JSContext* cx, unsigned argc, + Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + return AsyncFromSyncIteratorMethod(cx, args, CompletionKind::Throw); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.5.1.2 AsyncGenerator.prototype.next +bool js::AsyncGeneratorNext(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Steps 1-3. + return AsyncGeneratorEnqueue(cx, args.thisv(), CompletionKind::Normal, + args.get(0), args.rval()); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.5.1.3 AsyncGenerator.prototype.return +bool js::AsyncGeneratorReturn(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Steps 1-3. + return AsyncGeneratorEnqueue(cx, args.thisv(), CompletionKind::Return, + args.get(0), args.rval()); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.5.1.4 AsyncGenerator.prototype.throw +bool js::AsyncGeneratorThrow(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Steps 1-3. + return AsyncGeneratorEnqueue(cx, args.thisv(), CompletionKind::Throw, + args.get(0), args.rval()); +} + +const JSClass AsyncGeneratorObject::class_ = { + "AsyncGenerator", + JSCLASS_HAS_RESERVED_SLOTS(AsyncGeneratorObject::Slots), + &classOps_, +}; + +const JSClassOps AsyncGeneratorObject::classOps_ = { + nullptr, // addProperty + nullptr, // delProperty + nullptr, // enumerate + nullptr, // newEnumerate + nullptr, // resolve + nullptr, // mayResolve + nullptr, // finalize + nullptr, // call + nullptr, // hasInstance + nullptr, // construct + CallTraceMethod, // trace +}; + +// ES 2017 draft 9.1.13. +// OrdinaryCreateFromConstructor specialized for AsyncGeneratorObjects. +static AsyncGeneratorObject* OrdinaryCreateFromConstructorAsynGen( + JSContext* cx, HandleFunction fun) { + // Step 1 (skipped). + + // Step 2. + RootedValue protoVal(cx); + if (!GetProperty(cx, fun, fun, cx->names().prototype, &protoVal)) { + return nullptr; + } + + RootedObject proto(cx, protoVal.isObject() ? &protoVal.toObject() : nullptr); + if (!proto) { + proto = GlobalObject::getOrCreateAsyncGeneratorPrototype(cx, cx->global()); + if (!proto) { + return nullptr; + } + } + + // Step 3. + return NewObjectWithGivenProto(cx, proto); +} + +/* static */ +AsyncGeneratorObject* AsyncGeneratorObject::create(JSContext* cx, + HandleFunction asyncGen) { + MOZ_ASSERT(asyncGen->isAsync() && asyncGen->isGenerator()); + + AsyncGeneratorObject* asyncGenObj = + OrdinaryCreateFromConstructorAsynGen(cx, asyncGen); + if (!asyncGenObj) { + return nullptr; + } + + // ES2019 draft rev c2aad21fee7f5ddc89fdf7d3d305618ca3a13242 + // 25.5.3.2 AsyncGeneratorStart. + + // Step 7. + asyncGenObj->setSuspendedStart(); + + // Step 8. + asyncGenObj->clearSingleQueueRequest(); + + asyncGenObj->clearCachedRequest(); + + return asyncGenObj; +} + +/* static */ +AsyncGeneratorRequest* AsyncGeneratorObject::createRequest( + JSContext* cx, Handle asyncGenObj, + CompletionKind completionKind, HandleValue completionValue, + Handle promise) { + if (!asyncGenObj->hasCachedRequest()) { + return AsyncGeneratorRequest::create(cx, completionKind, completionValue, + promise); + } + + AsyncGeneratorRequest* request = asyncGenObj->takeCachedRequest(); + request->init(completionKind, completionValue, promise); + return request; +} + +/* static */ MOZ_MUST_USE bool AsyncGeneratorObject::enqueueRequest( + JSContext* cx, Handle asyncGenObj, + Handle request) { + if (asyncGenObj->isSingleQueue()) { + if (asyncGenObj->isSingleQueueEmpty()) { + asyncGenObj->setSingleQueueRequest(request); + return true; + } + + Rooted queue(cx, ListObject::create(cx)); + if (!queue) { + return false; + } + + RootedValue requestVal(cx, ObjectValue(*asyncGenObj->singleQueueRequest())); + if (!queue->append(cx, requestVal)) { + return false; + } + requestVal = ObjectValue(*request); + if (!queue->append(cx, requestVal)) { + return false; + } + + asyncGenObj->setQueue(queue); + return true; + } + + Rooted queue(cx, asyncGenObj->queue()); + RootedValue requestVal(cx, ObjectValue(*request)); + return queue->append(cx, requestVal); +} + +/* static */ +AsyncGeneratorRequest* AsyncGeneratorObject::dequeueRequest( + JSContext* cx, Handle asyncGenObj) { + if (asyncGenObj->isSingleQueue()) { + AsyncGeneratorRequest* request = asyncGenObj->singleQueueRequest(); + asyncGenObj->clearSingleQueueRequest(); + return request; + } + + Rooted queue(cx, asyncGenObj->queue()); + return &queue->popFirstAs(cx); +} + +/* static */ +AsyncGeneratorRequest* AsyncGeneratorObject::peekRequest( + Handle asyncGenObj) { + if (asyncGenObj->isSingleQueue()) { + return asyncGenObj->singleQueueRequest(); + } + + return &asyncGenObj->queue()->getAs(0); +} + +const JSClass AsyncGeneratorRequest::class_ = { + "AsyncGeneratorRequest", + JSCLASS_HAS_RESERVED_SLOTS(AsyncGeneratorRequest::Slots)}; + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.5.3.1 AsyncGeneratorRequest Records +/* static */ +AsyncGeneratorRequest* AsyncGeneratorRequest::create( + JSContext* cx, CompletionKind completionKind, HandleValue completionValue, + Handle promise) { + AsyncGeneratorRequest* request = + NewObjectWithGivenProto(cx, nullptr); + if (!request) { + return nullptr; + } + + request->init(completionKind, completionValue, promise); + return request; +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.5.3.2 AsyncGeneratorStart +static MOZ_MUST_USE bool AsyncGeneratorReturned( + JSContext* cx, Handle asyncGenObj, + HandleValue value) { + // Step 5.d. + asyncGenObj->setCompleted(); + + // Step 5.e (done in bytecode). + // Step 5.f.i (implicit). + + // Step 5.g. + return AsyncGeneratorResolve(cx, asyncGenObj, value, true); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.5.3.2 AsyncGeneratorStart +static MOZ_MUST_USE bool AsyncGeneratorThrown( + JSContext* cx, Handle asyncGenObj) { + // Step 5.d. + asyncGenObj->setCompleted(); + + // Not much we can do about uncatchable exceptions, so just bail. + if (!cx->isExceptionPending()) { + return false; + } + + // Step 5.f.i. + RootedValue value(cx); + if (!GetAndClearException(cx, &value)) { + return false; + } + + // Step 5.f.ii. + return AsyncGeneratorReject(cx, asyncGenObj, value); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 25.5.3.7 AsyncGeneratorYield (partially) +// Most steps are done in generator. +static MOZ_MUST_USE bool AsyncGeneratorYield( + JSContext* cx, Handle asyncGenObj, + HandleValue value) { + // Step 5 is done in bytecode. + + // Step 6. + asyncGenObj->setSuspendedYield(); + + // Step 9. + return AsyncGeneratorResolve(cx, asyncGenObj, value, false); +} + +// ES2019 draft rev c012f9c70847559a1d9dc0d35d35b27fec42911e +// 6.2.3.1 Await, steps 2-9. +// 14.4.13 RS: Evaluation, yield*, steps 7.a.vi, 7.b.ii.7, 7.c.ix. +// 25.5.3.2 AsyncGeneratorStart, steps 5.d-g. +// 25.5.3.5 AsyncGeneratorResumeNext, steps 12-20. +// 25.5.3.7 AsyncGeneratorYield, steps 5-6, 9. +// +// Note: Execution context switching is handled in generator. +MOZ_MUST_USE bool js::AsyncGeneratorResume( + JSContext* cx, Handle asyncGenObj, + CompletionKind completionKind, HandleValue argument) { + MOZ_ASSERT(!asyncGenObj->isClosed(), + "closed generator when resuming async generator"); + MOZ_ASSERT(asyncGenObj->isSuspended(), + "non-suspended generator when resuming async generator"); + MOZ_ASSERT(asyncGenObj->isExecuting(), + "async generator not set into 'executing' state"); + + // 25.5.3.5, steps 12-14, 16-20. + HandlePropertyName funName = completionKind == CompletionKind::Normal + ? cx->names().AsyncGeneratorNext + : completionKind == CompletionKind::Throw + ? cx->names().AsyncGeneratorThrow + : cx->names().AsyncGeneratorReturn; + FixedInvokeArgs<1> args(cx); + args[0].set(argument); + RootedValue thisOrRval(cx, ObjectValue(*asyncGenObj)); + if (!CallSelfHostedFunction(cx, funName, thisOrRval, args, &thisOrRval)) { + // 25.5.3.2, steps 5.f, 5.g. + if (!asyncGenObj->isClosed()) { + asyncGenObj->setClosed(); + } + return AsyncGeneratorThrown(cx, asyncGenObj); + } + + // 6.2.3.1, steps 2-9. + if (asyncGenObj->isAfterAwait()) { + return AsyncGeneratorAwait(cx, asyncGenObj, thisOrRval); + } + + // 25.5.3.7, steps 5-6, 9. + if (asyncGenObj->isAfterYield()) { + return AsyncGeneratorYield(cx, asyncGenObj, thisOrRval); + } + + // 25.5.3.2, steps 5.d-g. + return AsyncGeneratorReturned(cx, asyncGenObj, thisOrRval); +} + +static const JSFunctionSpec async_iterator_proto_methods[] = { + JS_SELF_HOSTED_SYM_FN(asyncIterator, "AsyncIteratorIdentity", 0, 0), + JS_FS_END}; + +static const JSFunctionSpec async_iterator_proto_methods_with_helpers[] = { + JS_SELF_HOSTED_FN("map", "AsyncIteratorMap", 1, 0), + JS_SELF_HOSTED_FN("filter", "AsyncIteratorFilter", 1, 0), + JS_SELF_HOSTED_FN("take", "AsyncIteratorTake", 1, 0), + JS_SELF_HOSTED_FN("drop", "AsyncIteratorDrop", 1, 0), + JS_SELF_HOSTED_FN("asIndexedPairs", "AsyncIteratorAsIndexedPairs", 0, 0), + JS_SELF_HOSTED_FN("flatMap", "AsyncIteratorFlatMap", 1, 0), + JS_SELF_HOSTED_FN("reduce", "AsyncIteratorReduce", 1, 0), + JS_SELF_HOSTED_FN("toArray", "AsyncIteratorToArray", 0, 0), + JS_SELF_HOSTED_FN("forEach", "AsyncIteratorForEach", 1, 0), + JS_SELF_HOSTED_FN("some", "AsyncIteratorSome", 1, 0), + JS_SELF_HOSTED_FN("every", "AsyncIteratorEvery", 1, 0), + JS_SELF_HOSTED_FN("find", "AsyncIteratorFind", 1, 0), + JS_SELF_HOSTED_SYM_FN(asyncIterator, "AsyncIteratorIdentity", 0, 0), + JS_FS_END}; + +static const JSFunctionSpec async_from_sync_iter_methods[] = { + JS_FN("next", AsyncFromSyncIteratorNext, 1, 0), + JS_FN("throw", AsyncFromSyncIteratorThrow, 1, 0), + JS_FN("return", AsyncFromSyncIteratorReturn, 1, 0), JS_FS_END}; + +static const JSFunctionSpec async_generator_methods[] = { + JS_FN("next", js::AsyncGeneratorNext, 1, 0), + JS_FN("throw", js::AsyncGeneratorThrow, 1, 0), + JS_FN("return", js::AsyncGeneratorReturn, 1, 0), JS_FS_END}; + +bool GlobalObject::initAsyncIteratorProto(JSContext* cx, + Handle global) { + if (global->getReservedSlot(ASYNC_ITERATOR_PROTO).isObject()) { + return true; + } + + // 25.1.3 The %AsyncIteratorPrototype% Object + RootedObject asyncIterProto( + cx, GlobalObject::createBlankPrototype(cx, global)); + if (!asyncIterProto) { + return false; + } + if (!DefinePropertiesAndFunctions(cx, asyncIterProto, nullptr, + async_iterator_proto_methods)) { + return false; + } + + global->setReservedSlot(ASYNC_ITERATOR_PROTO, ObjectValue(*asyncIterProto)); + return true; +} + +bool GlobalObject::initAsyncFromSyncIteratorProto( + JSContext* cx, Handle global) { + if (global->getReservedSlot(ASYNC_FROM_SYNC_ITERATOR_PROTO).isObject()) { + return true; + } + + RootedObject asyncIterProto( + cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global)); + if (!asyncIterProto) { + return false; + } + + // 25.1.4.2 The %AsyncFromSyncIteratorPrototype% Object + RootedObject asyncFromSyncIterProto( + cx, GlobalObject::createBlankPrototypeInheriting(cx, &PlainObject::class_, + asyncIterProto)); + if (!asyncFromSyncIterProto) { + return false; + } + if (!DefinePropertiesAndFunctions(cx, asyncFromSyncIterProto, nullptr, + async_from_sync_iter_methods) || + !DefineToStringTag(cx, asyncFromSyncIterProto, + cx->names().AsyncFromSyncIterator)) { + return false; + } + + global->setReservedSlot(ASYNC_FROM_SYNC_ITERATOR_PROTO, + ObjectValue(*asyncFromSyncIterProto)); + return true; +} + +static JSObject* CreateAsyncGeneratorFunction(JSContext* cx, JSProtoKey key) { + RootedObject proto( + cx, GlobalObject::getOrCreateFunctionConstructor(cx, cx->global())); + if (!proto) { + return nullptr; + } + HandlePropertyName name = cx->names().AsyncGeneratorFunction; + + // 25.3.1 The AsyncGeneratorFunction Constructor + return NewFunctionWithProto(cx, AsyncGeneratorConstructor, 1, + FunctionFlags::NATIVE_CTOR, nullptr, name, proto, + gc::AllocKind::FUNCTION, TenuredObject); +} + +static JSObject* CreateAsyncGeneratorFunctionPrototype(JSContext* cx, + JSProtoKey key) { + return NewTenuredObjectWithFunctionPrototype(cx, cx->global()); +} + +static bool AsyncGeneratorFunctionClassFinish(JSContext* cx, + HandleObject asyncGenFunction, + HandleObject asyncGenerator) { + Handle global = cx->global(); + + // Change the "constructor" property to non-writable before adding any other + // properties, so it's still the last property and can be modified without a + // dictionary-mode transition. + MOZ_ASSERT(StringEqualsAscii( + JSID_TO_LINEAR_STRING( + asyncGenerator->as().lastProperty()->propid()), + "constructor")); + MOZ_ASSERT(!asyncGenerator->as().inDictionaryMode()); + + RootedValue asyncGenFunctionVal(cx, ObjectValue(*asyncGenFunction)); + if (!DefineDataProperty(cx, asyncGenerator, cx->names().constructor, + asyncGenFunctionVal, JSPROP_READONLY)) { + return false; + } + MOZ_ASSERT(!asyncGenerator->as().inDictionaryMode()); + + RootedObject asyncIterProto( + cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global)); + if (!asyncIterProto) { + return false; + } + + // 25.5 AsyncGenerator Objects + RootedObject asyncGenProto(cx, GlobalObject::createBlankPrototypeInheriting( + cx, &PlainObject::class_, asyncIterProto)); + if (!asyncGenProto) { + return false; + } + if (!DefinePropertiesAndFunctions(cx, asyncGenProto, nullptr, + async_generator_methods) || + !DefineToStringTag(cx, asyncGenProto, cx->names().AsyncGenerator)) { + return false; + } + + // 25.3.3 Properties of the AsyncGeneratorFunction Prototype Object + if (!LinkConstructorAndPrototype(cx, asyncGenerator, asyncGenProto, + JSPROP_READONLY, JSPROP_READONLY) || + !DefineToStringTag(cx, asyncGenerator, + cx->names().AsyncGeneratorFunction)) { + return false; + } + + global->setAsyncGeneratorPrototype(asyncGenProto); + + return true; +} + +static const ClassSpec AsyncGeneratorFunctionClassSpec = { + CreateAsyncGeneratorFunction, + CreateAsyncGeneratorFunctionPrototype, + nullptr, + nullptr, + nullptr, + nullptr, + AsyncGeneratorFunctionClassFinish, + ClassSpec::DontDefineConstructor}; + +const JSClass js::AsyncGeneratorFunctionClass = { + "AsyncGeneratorFunction", 0, JS_NULL_CLASS_OPS, + &AsyncGeneratorFunctionClassSpec}; + +// https://tc39.es/proposal-iterator-helpers/#sec-asynciterator as of revision +// 8f10db5. +static bool AsyncIteratorConstructor(JSContext* cx, unsigned argc, Value* vp) { + CallArgs args = CallArgsFromVp(argc, vp); + + // Step 1. + if (!ThrowIfNotConstructing(cx, args, js_AsyncIterator_str)) { + return false; + } + // Throw TypeError if NewTarget is the active function object, preventing the + // Iterator constructor from being used directly. + if (args.callee() == args.newTarget().toObject()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BOGUS_CONSTRUCTOR, js_AsyncIterator_str); + return false; + } + + // Step 2. + RootedObject proto(cx); + if (!GetPrototypeFromBuiltinConstructor(cx, args, JSProto_AsyncIterator, + &proto)) { + return false; + } + + JSObject* obj = NewObjectWithClassProto(cx, proto); + if (!obj) { + return false; + } + + args.rval().setObject(*obj); + return true; +} + +static const ClassSpec AsyncIteratorObjectClassSpec = { + GenericCreateConstructor, + GenericCreatePrototype, + nullptr, + nullptr, + async_iterator_proto_methods_with_helpers, + nullptr, + nullptr, +}; + +const JSClass AsyncIteratorObject::class_ = { + js_AsyncIterator_str, + JSCLASS_HAS_CACHED_PROTO(JSProto_AsyncIterator), + JS_NULL_CLASS_OPS, + &AsyncIteratorObjectClassSpec, +}; + +const JSClass AsyncIteratorObject::protoClass_ = { + "AsyncIterator.prototype", + JSCLASS_HAS_CACHED_PROTO(JSProto_AsyncIterator), + JS_NULL_CLASS_OPS, + &AsyncIteratorObjectClassSpec, +}; + +// Iterator Helper proposal +static const JSFunctionSpec async_iterator_helper_methods[] = { + JS_SELF_HOSTED_FN("next", "AsyncIteratorHelperNext", 1, 0), + JS_SELF_HOSTED_FN("return", "AsyncIteratorHelperReturn", 1, 0), + JS_SELF_HOSTED_FN("throw", "AsyncIteratorHelperThrow", 1, 0), + JS_FS_END, +}; + +static const JSClass AsyncIteratorHelperPrototypeClass = { + "Async Iterator Helper", 0}; + +const JSClass AsyncIteratorHelperObject::class_ = { + "Async Iterator Helper", + JSCLASS_HAS_RESERVED_SLOTS(AsyncIteratorHelperObject::SlotCount), +}; + +/* static */ +NativeObject* GlobalObject::getOrCreateAsyncIteratorHelperPrototype( + JSContext* cx, Handle global) { + return MaybeNativeObject(getOrCreateObject( + cx, global, ASYNC_ITERATOR_HELPER_PROTO, initAsyncIteratorHelperProto)); +} + +/* static */ +bool GlobalObject::initAsyncIteratorHelperProto(JSContext* cx, + Handle global) { + if (global->getReservedSlot(ASYNC_ITERATOR_HELPER_PROTO).isObject()) { + return true; + } + + RootedObject asyncIterProto( + cx, GlobalObject::getOrCreateAsyncIteratorPrototype(cx, global)); + if (!asyncIterProto) { + return false; + } + + RootedObject asyncIteratorHelperProto( + cx, GlobalObject::createBlankPrototypeInheriting( + cx, &AsyncIteratorHelperPrototypeClass, asyncIterProto)); + if (!asyncIteratorHelperProto) { + return false; + } + if (!DefinePropertiesAndFunctions(cx, asyncIteratorHelperProto, nullptr, + async_iterator_helper_methods)) { + return false; + } + + global->setReservedSlot(ASYNC_ITERATOR_HELPER_PROTO, + ObjectValue(*asyncIteratorHelperProto)); + return true; +} + +AsyncIteratorHelperObject* js::NewAsyncIteratorHelper(JSContext* cx) { + RootedObject proto(cx, GlobalObject::getOrCreateAsyncIteratorHelperPrototype( + cx, cx->global())); + if (!proto) { + return nullptr; + } + return NewObjectWithGivenProto(cx, proto); +} diff --git a/js/src/vm/AsyncIteration.h b/js/src/vm/AsyncIteration.h new file mode 100644 index 0000000000..6249251913 --- /dev/null +++ b/js/src/vm/AsyncIteration.h @@ -0,0 +1,591 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_AsyncIteration_h +#define vm_AsyncIteration_h + +#include "builtin/SelfHostingDefines.h" +#include "js/Class.h" +#include "vm/GeneratorObject.h" +#include "vm/JSContext.h" +#include "vm/JSObject.h" +#include "vm/List.h" +#include "vm/PromiseObject.h" + +// [SMDOC] Async generators +// +// # Start +// +// When an async generator is called, it synchronously runs until the +// JSOp::InitialYield and then suspends, just like a sync generator, and returns +// an async generator object (js::AsyncGeneratorObject). +// +// +// # Request queue +// +// When next/return/throw is called on the async generator object, +// js::AsyncGeneratorEnqueue performs the following: +// * Create a new AsyncGeneratorRequest and enqueue it in the generator +// object's request queue. +// * Resume the generator with the oldest request, if the generator is +// suspended (see "Resume" section below) +// * Return the promise for the request +// +// This is done in js::AsyncGeneratorEnqueue, which corresponds to +// AsyncGeneratorEnqueue in the spec, +// and js::AsyncGeneratorResumeNext corresponds to the following: +// * AsyncGeneratorResolve +// * AsyncGeneratorReject +// * AsyncGeneratorResumeNext +// +// The returned promise is resolved when the resumption for the request +// completes with yield/throw/return, in js::AsyncGeneratorResolve and +// js::AsyncGeneratorReject. +// They correspond to AsyncGeneratorResolve and AsyncGeneratorReject in the +// spec. +// +// +// # Await +// +// Async generator's `await` is implemented differently than async function's +// `await`. +// +// The bytecode is the following: +// (ignoring CanSkipAwait; see the comment in AsyncFunction.h for more details) +// +// ``` +// (operand here) # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// Await 0 # RVAL GENERATOR RESUMEKIND +// +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// ``` +// +// Async generators don't use JSOp::AsyncAwait, and that part is handled +// in js::AsyncGeneratorResume, and js::AsyncGeneratorAwait called there. +// +// Both JSOp::Await and JSOp::Yield behave in the exactly same way, +// and js::AsyncGeneratorResume checks the last opcode and branches for +// await/yield/return cases. +// +// +// # Reaction jobs and resume after await +// +// This is almost same as for async functions (see AsyncFunction.h). +// +// The reaction record for the job is marked as "this is for async generator" +// (see js::AsyncGeneratorAwait), and handled specially in +// js::PromiseReactionJob, which calls js::AsyncGeneratorPromiseReactionJob. +// +// +// # Yield +// +// `yield` is implemented with the following bytecode sequence: +// (Ignoring CanSkipAwait for simplicity) +// +// ``` +// (operand here) # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// Await 1 # RVAL GENERATOR RESUMEKIND +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// +// GetAliasedVar ".generator" # RVAL .generator +// Yield 2 # RVAL2 GENERATOR RESUMEKIND +// +// AfterYield # RVAL2 GENERATOR RESUMEKIND +// CheckResumeKind # RVAL2 +// ``` +// +// The 1st part (JSOp::Await + JSOp::CheckResumeKind) performs an implicit +// `await`, as specified in AsyncGeneratorYield step 5. +// +// AsyncGeneratorYield ( value ) +// https://tc39.es/ecma262/#sec-asyncgeneratoryield +// +// 5. Set value to ? Await(value). +// +// The 2nd part (JSOp::Yield) suspends execution and yields the result of +// `await`, as specified in AsyncGeneratorYield steps 1-4, 6-7, 9-10. +// +// AsyncGeneratorYield ( value ) +// https://tc39.es/ecma262/#sec-asyncgeneratoryield +// +// 1. Let genContext be the running execution context. +// 2. Assert: genContext is the execution context of a generator. +// 3. Let generator be the value of the Generator component of genContext. +// 4. Assert: GetGeneratorKind() is async. +// .. +// 6. Set generator.[[AsyncGeneratorState]] to suspendedYield. +// 7. Remove genContext from the execution context stack and restore the +// execution context that is at the top of the execution context stack as +// the running execution context. +// 8. ... +// 9. Return ! AsyncGeneratorResolve(generator, value, false). +// 10. NOTE: This returns to the evaluation of the operation that had most +// previously resumed evaluation of genContext. +// +// The last part (JSOp::CheckResumeKind) checks the resumption type and +// resumes/throws/returns the execution, as specified in AsyncGeneratorYield +// step 8. +// +// 8. Set the code evaluation state of genContext such that when evaluation is +// resumed with a Completion resumptionValue the following steps will be +// performed: +// a. If resumptionValue.[[Type]] is not return, return +// Completion(resumptionValue). +// b. Let awaited be Await(resumptionValue.[[Value]]). +// c. If awaited.[[Type]] is throw, return Completion(awaited). +// d. Assert: awaited.[[Type]] is normal. +// e. Return Completion { [[Type]]: return, [[Value]]: awaited.[[Value]], +// [[Target]]: empty }. +// f. NOTE: When one of the above steps returns, it returns to the +// evaluation of the YieldExpression production that originally called +// this abstract operation. +// +// Resumption with `AsyncGenerator.prototype.return` is handled differently. +// See "Resumption with return" section below. +// +// +// # Return +// +// `return` with operand is implemented with the following bytecode sequence: +// (Ignoring CanSkipAwait for simplicity) +// +// ``` +// (operand here) # VALUE +// GetAliasedVar ".generator" # VALUE .generator +// Await 0 # RVAL GENERATOR RESUMEKIND +// AfterYield # RVAL GENERATOR RESUMEKIND +// CheckResumeKind # RVAL +// +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// ``` +// +// The 1st part (JSOp::Await + JSOp::CheckResumeKind) performs implicit +// `await`, as specified in ReturnStatement's Evaluation step 3. +// +// ReturnStatement: return Expression; +// https://tc39.es/ecma262/#sec-return-statement-runtime-semantics-evaluation +// +// 3. If ! GetGeneratorKind() is async, set exprValue to ? Await(exprValue). +// +// And the 2nd part corresponds to AsyncGeneratorStart steps 5.a-e, 5.g. +// +// AsyncGeneratorStart ( generator, generatorBody ) +// https://tc39.es/ecma262/#sec-asyncgeneratorstart +// +// 5. Set the code evaluation state of genContext such that when evaluation +// is resumed for that execution context the following steps will be +// performed: +// a. Let result be the result of evaluating generatorBody. +// b. Assert: If we return here, the async generator either threw an +// exception or performed either an implicit or explicit return. +// c. Remove genContext from the execution context stack and restore the +// execution context that is at the top of the execution context stack +// as the running execution context. +// d. Set generator.[[AsyncGeneratorState]] to completed. +// e. If result is a normal completion, let resultValue be undefined. +// ... +// g. Return ! AsyncGeneratorResolve(generator, resultValue, true). +// +// `return` without operand or implicit return is implicit with the following +// bytecode sequence: +// +// ``` +// Undefined # undefined +// SetRval # +// GetAliasedVar ".generator" # .generator +// FinalYieldRval # +// ``` +// +// This is also AsyncGeneratorStart steps 5.a-e, 5.g. +// +// +// # Throw +// +// Unlike async function, async generator doesn't use implicit try-catch, +// but the throw completion is handled by js::AsyncGeneratorResume, +// and js::AsyncGeneratorThrown is called there. +// +// 5. ... +// f. Else, +// i. Let resultValue be result.[[Value]]. +// ii. If result.[[Type]] is not return, then +// 1. Return ! AsyncGeneratorReject(generator, resultValue). +// +// +// # Resumption with return +// +// Resumption with return completion is handled in js::AsyncGeneratorResumeNext. +// +// If the generator is suspended, it doesn't immediately resume the generator +// script itself, but handles implicit `await` it in +// js::AsyncGeneratorResumeNext. +// (See PromiseHandlerAsyncGeneratorYieldReturnAwaitedFulfilled and +// PromiseHandlerAsyncGeneratorYieldReturnAwaitedRejected), and resumes the +// generator with the result of await. +// And the return completion is finally handled in JSOp::CheckResumeKind +// after JSOp::Yield. +// +// This corresponds to AsyncGeneratorYield step 8. +// +// AsyncGeneratorYield ( value ) +// https://tc39.es/ecma262/#sec-asyncgeneratoryield +// +// 8. Set the code evaluation state of genContext such that when evaluation +// is resumed with a Completion resumptionValue the following steps will +// be performed: +// .. +// b. Let awaited be Await(resumptionValue.[[Value]]). +// c. If awaited.[[Type]] is throw, return Completion(awaited). +// d. Assert: awaited.[[Type]] is normal. +// e. Return Completion { [[Type]]: return, [[Value]]: awaited.[[Value]], +// [[Target]]: empty }. +// +// If the generator is already completed, it awaits on the return value, +// (See PromiseHandlerAsyncGeneratorResumeNextReturnFulfilled and +// PromiseHandlerAsyncGeneratorResumeNextReturnRejected), and resolves the +// request's promise with the value. +// +// It corresponds to AsyncGeneratorResumeNext step 10.b.i. +// +// AsyncGeneratorResumeNext ( generator ) +// https://tc39.es/ecma262/#sec-asyncgeneratorresumenext +// +// 10. If completion is an abrupt completion, then +// .. +// b. If state is completed, then +// i. If completion.[[Type]] is return, then +// 1. Set generator.[[AsyncGeneratorState]] to awaiting-return. +// 2. Let promise be ? PromiseResolve(%Promise%, completion.[[Value]]). +// 3. Let stepsFulfilled be the algorithm steps defined in +// AsyncGeneratorResumeNext Return Processor Fulfilled Functions. +// 4. Let onFulfilled be ! CreateBuiltinFunction(stepsFulfilled, « +// [[Generator]] »). +// 5. Set onFulfilled.[[Generator]] to generator. +// 6. Let stepsRejected be the algorithm steps defined in +// AsyncGeneratorResumeNext Return Processor Rejected Functions. +// 7. Let onRejected be ! CreateBuiltinFunction(stepsRejected, « +// [[Generator]] »). +// 8. Set onRejected.[[Generator]] to generator. +// 9. Perform ! PerformPromiseThen(promise, onFulfilled, onRejected). +// 10. Return undefined. +// + +namespace js { + +class AsyncGeneratorObject; + +extern const JSClass AsyncGeneratorFunctionClass; + +// Resume the async generator when the `await` operand fulfills to `value`. +MOZ_MUST_USE bool AsyncGeneratorAwaitedFulfilled( + JSContext* cx, Handle asyncGenObj, + HandleValue value); + +// Resume the async generator when the `await` operand rejects with `reason`. +MOZ_MUST_USE bool AsyncGeneratorAwaitedRejected( + JSContext* cx, Handle asyncGenObj, + HandleValue reason); + +// Resume the async generator after awaiting on the value passed to +// AsyncGenerator#return, when the async generator was still executing. +// Split into two functions depending on whether the awaited value was +// fulfilled or rejected. +MOZ_MUST_USE bool AsyncGeneratorYieldReturnAwaitedFulfilled( + JSContext* cx, Handle asyncGenObj, + HandleValue value); +MOZ_MUST_USE bool AsyncGeneratorYieldReturnAwaitedRejected( + JSContext* cx, Handle asyncGenObj, + HandleValue reason); + +bool AsyncGeneratorNext(JSContext* cx, unsigned argc, Value* vp); +bool AsyncGeneratorReturn(JSContext* cx, unsigned argc, Value* vp); +bool AsyncGeneratorThrow(JSContext* cx, unsigned argc, Value* vp); + +// AsyncGeneratorRequest record in the spec. +// Stores the info from AsyncGenerator#{next,return,throw}. +// +// This object is reused across multiple requests as an optimization, and +// stored in the Slot_CachedRequest slot. +class AsyncGeneratorRequest : public NativeObject { + private: + enum AsyncGeneratorRequestSlots { + // Int32 value with CompletionKind. + // Normal: next + // Return: return + // Throw: throw + Slot_CompletionKind = 0, + + // The value passed to AsyncGenerator#{next,return,throw}. + Slot_CompletionValue, + + // The promise returned by AsyncGenerator#{next,return,throw}. + Slot_Promise, + + Slots, + }; + + void init(CompletionKind completionKind, const Value& completionValue, + PromiseObject* promise) { + setFixedSlot(Slot_CompletionKind, + Int32Value(static_cast(completionKind))); + setFixedSlot(Slot_CompletionValue, completionValue); + setFixedSlot(Slot_Promise, ObjectValue(*promise)); + } + + // Clear the request data for reuse. + void clearData() { + setFixedSlot(Slot_CompletionValue, NullValue()); + setFixedSlot(Slot_Promise, NullValue()); + } + + friend AsyncGeneratorObject; + + public: + static const JSClass class_; + + static AsyncGeneratorRequest* create(JSContext* cx, + CompletionKind completionKind, + HandleValue completionValue, + Handle promise); + + CompletionKind completionKind() const { + return static_cast( + getFixedSlot(Slot_CompletionKind).toInt32()); + } + JS::Value completionValue() const { + return getFixedSlot(Slot_CompletionValue); + } + PromiseObject* promise() const { + return &getFixedSlot(Slot_Promise).toObject().as(); + } +}; + +class AsyncGeneratorObject : public AbstractGeneratorObject { + private: + enum AsyncGeneratorObjectSlots { + // Int32 value containing one of the |State| fields from below. + Slot_State = AbstractGeneratorObject::RESERVED_SLOTS, + + // * null value if this async generator has no requests + // * AsyncGeneratorRequest if this async generator has only one request + // * list object if this async generator has 2 or more requests + Slot_QueueOrRequest, + + // Cached AsyncGeneratorRequest for later use. + // undefined if there's no cache. + Slot_CachedRequest, + + Slots + }; + + public: + enum State { + // "suspendedStart" in the spec. + // Suspended after invocation. + State_SuspendedStart, + + // "suspendedYield" in the spec + // Suspended with `yield` expression. + State_SuspendedYield, + + // "executing" in the spec. + // Resumed from initial suspend or yield, and either running the script + // or awaiting for `await` expression. + State_Executing, + + // Part of "executing" in the spec. + // Awaiting on the value passed by AsyncGenerator#return which is called + // while executing. + State_AwaitingYieldReturn, + + // "awaiting-return" in the spec. + // Awaiting on the value passed by AsyncGenerator#return which is called + // after completed. + State_AwaitingReturn, + + // "completed" in the spec. + // The generator is completed. + State_Completed + }; + + State state() const { + return static_cast(getFixedSlot(Slot_State).toInt32()); + } + void setState(State state_) { setFixedSlot(Slot_State, Int32Value(state_)); } + + private: + // Queue is implemented in 2 ways. If only one request is queued ever, + // request is stored directly to the slot. Once 2 requests are queued, a + // list is created and requests are appended into it, and the list is + // stored to the slot. + + bool isSingleQueue() const { + return getFixedSlot(Slot_QueueOrRequest).isNull() || + getFixedSlot(Slot_QueueOrRequest) + .toObject() + .is(); + } + bool isSingleQueueEmpty() const { + return getFixedSlot(Slot_QueueOrRequest).isNull(); + } + void setSingleQueueRequest(AsyncGeneratorRequest* request) { + setFixedSlot(Slot_QueueOrRequest, ObjectValue(*request)); + } + void clearSingleQueueRequest() { + setFixedSlot(Slot_QueueOrRequest, NullValue()); + } + AsyncGeneratorRequest* singleQueueRequest() const { + return &getFixedSlot(Slot_QueueOrRequest) + .toObject() + .as(); + } + + ListObject* queue() const { + return &getFixedSlot(Slot_QueueOrRequest).toObject().as(); + } + void setQueue(ListObject* queue_) { + setFixedSlot(Slot_QueueOrRequest, ObjectValue(*queue_)); + } + + public: + static const JSClass class_; + static const JSClassOps classOps_; + + static AsyncGeneratorObject* create(JSContext* cx, HandleFunction asyncGen); + + bool isSuspendedStart() const { return state() == State_SuspendedStart; } + bool isSuspendedYield() const { return state() == State_SuspendedYield; } + bool isExecuting() const { return state() == State_Executing; } + bool isAwaitingYieldReturn() const { + return state() == State_AwaitingYieldReturn; + } + bool isAwaitingReturn() const { return state() == State_AwaitingReturn; } + bool isCompleted() const { return state() == State_Completed; } + + void setSuspendedStart() { setState(State_SuspendedStart); } + void setSuspendedYield() { setState(State_SuspendedYield); } + void setExecuting() { setState(State_Executing); } + void setAwaitingYieldReturn() { setState(State_AwaitingYieldReturn); } + void setAwaitingReturn() { setState(State_AwaitingReturn); } + void setCompleted() { setState(State_Completed); } + + static MOZ_MUST_USE bool enqueueRequest( + JSContext* cx, Handle asyncGenObj, + Handle request); + + static AsyncGeneratorRequest* dequeueRequest( + JSContext* cx, Handle asyncGenObj); + + static AsyncGeneratorRequest* peekRequest( + Handle asyncGenObj); + + bool isQueueEmpty() const { + if (isSingleQueue()) { + return isSingleQueueEmpty(); + } + return queue()->getDenseInitializedLength() == 0; + } + + // This function does either of the following: + // * return a cached request object with the slots updated + // * create a new request object with the slots set + static AsyncGeneratorRequest* createRequest( + JSContext* cx, Handle asyncGenObj, + CompletionKind completionKind, HandleValue completionValue, + Handle promise); + + // Stores the given request to the generator's cache after clearing its data + // slots. The cached request will be reused in the subsequent createRequest + // call. + void cacheRequest(AsyncGeneratorRequest* request) { + if (hasCachedRequest()) { + return; + } + + request->clearData(); + setFixedSlot(Slot_CachedRequest, ObjectValue(*request)); + } + + private: + bool hasCachedRequest() const { + return getFixedSlot(Slot_CachedRequest).isObject(); + } + + AsyncGeneratorRequest* takeCachedRequest() { + auto request = &getFixedSlot(Slot_CachedRequest) + .toObject() + .as(); + clearCachedRequest(); + return request; + } + + void clearCachedRequest() { setFixedSlot(Slot_CachedRequest, NullValue()); } +}; + +JSObject* CreateAsyncFromSyncIterator(JSContext* cx, HandleObject iter, + HandleValue nextMethod); + +class AsyncFromSyncIteratorObject : public NativeObject { + private: + enum AsyncFromSyncIteratorObjectSlots { + // Object that implements the sync iterator protocol. + Slot_Iterator = 0, + + // The `next` property of the iterator object. + Slot_NextMethod = 1, + + Slots + }; + + void init(JSObject* iterator, const Value& nextMethod) { + setFixedSlot(Slot_Iterator, ObjectValue(*iterator)); + setFixedSlot(Slot_NextMethod, nextMethod); + } + + public: + static const JSClass class_; + + static JSObject* create(JSContext* cx, HandleObject iter, + HandleValue nextMethod); + + JSObject* iterator() const { return &getFixedSlot(Slot_Iterator).toObject(); } + + const Value& nextMethod() const { return getFixedSlot(Slot_NextMethod); } +}; + +MOZ_MUST_USE bool AsyncGeneratorResume( + JSContext* cx, Handle asyncGenObj, + CompletionKind completionKind, HandleValue argument); + +class AsyncIteratorObject : public NativeObject { + public: + static const JSClass class_; + static const JSClass protoClass_; +}; + +// Iterator Helpers proposal +class AsyncIteratorHelperObject : public NativeObject { + public: + static const JSClass class_; + + enum { GeneratorSlot, SlotCount }; + + static_assert(GeneratorSlot == ASYNC_ITERATOR_HELPER_GENERATOR_SLOT, + "GeneratorSlot must match self-hosting define for generator " + "object slot."); +}; + +AsyncIteratorHelperObject* NewAsyncIteratorHelper(JSContext* cx); + +} // namespace js + +#endif /* vm_AsyncIteration_h */ diff --git a/js/src/vm/AtomsTable.h b/js/src/vm/AtomsTable.h new file mode 100644 index 0000000000..06d637e113 --- /dev/null +++ b/js/src/vm/AtomsTable.h @@ -0,0 +1,222 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Implementation details of the atoms table. + */ + +#ifndef vm_AtomsTable_h +#define vm_AtomsTable_h + +#include // std::{enable_if_t,is_const_v} + +#include "js/GCHashTable.h" +#include "js/TypeDecls.h" +#include "vm/JSAtom.h" + +/* + * The atoms table is a mapping from strings to JSAtoms that supports concurrent + * access and incremental sweeping. + * + * The table is partitioned based on the key into multiple sub-tables. Each + * sub-table is protected by a lock to ensure safety when accessed by helper + * threads. Concurrent access improves performance of off-thread parsing which + * frequently creates large numbers of atoms. Locking is only required when + * off-thread parsing is running. + */ + +namespace js { + +// Take all atoms table locks to allow iterating over cells in the atoms zone. +class MOZ_RAII AutoLockAllAtoms { + JSRuntime* runtime; + + public: + explicit AutoLockAllAtoms(JSRuntime* rt); + ~AutoLockAllAtoms(); +}; + +// This is a tagged pointer to an atom that duplicates the atom's pinned flag so +// that we don't have to check the atom itself when marking pinned atoms (there +// can be a great many atoms). See bug 1445196. +class AtomStateEntry { + uintptr_t bits; + + static const uintptr_t NO_TAG_MASK = uintptr_t(-1) - 1; + + public: + AtomStateEntry() : bits(0) {} + AtomStateEntry(const AtomStateEntry& other) = default; + AtomStateEntry(JSAtom* ptr, bool tagged) + : bits(uintptr_t(ptr) | uintptr_t(tagged)) { + MOZ_ASSERT((uintptr_t(ptr) & 0x1) == 0); + } + + bool isPinned() const { return bits & 0x1; } + + /* + * Non-branching code sequence. Note that the const_cast is safe because + * the hash function doesn't consider the tag to be a portion of the key. + */ + void setPinned(bool pinned) const { + const_cast(this)->bits |= uintptr_t(pinned); + } + + JSAtom* asPtrUnbarriered() const { + MOZ_ASSERT(bits); + return reinterpret_cast(bits & NO_TAG_MASK); + } + + JSAtom* asPtr(JSContext* cx) const; + + bool needsSweep() { + JSAtom* atom = asPtrUnbarriered(); + return gc::IsAboutToBeFinalizedUnbarriered(&atom); + } +}; + +struct AtomHasher { + struct Lookup; + static inline HashNumber hash(const Lookup& l); + static MOZ_ALWAYS_INLINE bool match(const AtomStateEntry& entry, + const Lookup& lookup); + static void rekey(AtomStateEntry& k, const AtomStateEntry& newKey) { + k = newKey; + } +}; + +using AtomSet = JS::GCHashSet; + +// This class is a wrapper for AtomSet that is used to ensure the AtomSet is +// not modified. It should only expose read-only methods from AtomSet. +// Note however that the atoms within the table can be marked during GC. +class FrozenAtomSet { + AtomSet* mSet; + + public: + // This constructor takes ownership of the passed-in AtomSet. + explicit FrozenAtomSet(AtomSet* set) { mSet = set; } + + ~FrozenAtomSet() { js_delete(mSet); } + + MOZ_ALWAYS_INLINE AtomSet::Ptr readonlyThreadsafeLookup( + const AtomSet::Lookup& l) const; + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return mSet->shallowSizeOfIncludingThis(mallocSizeOf); + } + + using Range = AtomSet::Range; + + AtomSet::Range all() const { return mSet->all(); } +}; + +class AtomsTable { + static const size_t PartitionShift = 5; + static const size_t PartitionCount = 1 << PartitionShift; + + // Use a low initial capacity for atom hash tables to avoid penalizing + // runtimes which create a small number of atoms. + static const size_t InitialTableSize = 16; + + // A single partition, representing a subset of the atoms in the table. + struct Partition { + explicit Partition(uint32_t index); + ~Partition(); + + // Lock that must be held to access this set. + Mutex lock; + + // The atoms in this set. + AtomSet atoms; + + // Set of atoms added while the |atoms| set is being swept. + AtomSet* atomsAddedWhileSweeping; + }; + + Partition* partitions[PartitionCount]; + +#ifdef DEBUG + bool allPartitionsLocked = false; +#endif + + public: + class AutoLock; + + // An iterator used for sweeping atoms incrementally. + class SweepIterator { + AtomsTable& atoms; + size_t partitionIndex; + mozilla::Maybe atomsIter; + + void settle(); + void startSweepingPartition(); + void finishSweepingPartition(); + + public: + explicit SweepIterator(AtomsTable& atoms); + bool empty() const; + AtomStateEntry front() const; + void removeFront(); + void popFront(); + }; + + ~AtomsTable(); + bool init(); + + template + MOZ_ALWAYS_INLINE JSAtom* atomizeAndCopyChars( + JSContext* cx, Chars chars, size_t length, PinningBehavior pin, + const mozilla::Maybe& indexValue, + const AtomHasher::Lookup& lookup); + + template >> + MOZ_ALWAYS_INLINE JSAtom* atomizeAndCopyChars( + JSContext* cx, CharT* chars, size_t length, PinningBehavior pin, + const mozilla::Maybe& indexValue, + const AtomHasher::Lookup& lookup) { + return atomizeAndCopyChars(cx, const_cast(chars), length, pin, + indexValue, lookup); + } + + bool atomIsPinned(JSRuntime* rt, JSAtom* atom); + + void maybePinExistingAtom(JSContext* cx, JSAtom* atom); + + void tracePinnedAtoms(JSTracer* trc, const AutoAccessAtomsZone& access); + + // Sweep all atoms non-incrementally. + void traceWeak(JSTracer* trc); + + bool startIncrementalSweep(); + + // Sweep some atoms incrementally and return whether we finished. + bool sweepIncrementally(SweepIterator& atomsToSweep, SliceBudget& budget); + +#ifdef DEBUG + bool mainThreadHasAllLocks() const { return allPartitionsLocked; } +#endif + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const; + + private: + // Map a key to a partition based on its hash. + MOZ_ALWAYS_INLINE size_t getPartitionIndex(const AtomHasher::Lookup& lookup); + + void tracePinnedAtomsInSet(JSTracer* trc, AtomSet& atoms); + void mergeAtomsAddedWhileSweeping(Partition& partition); + + friend class AutoLockAllAtoms; + void lockAll(); + void unlockAll(); +}; + +bool AtomIsPinned(JSContext* cx, JSAtom* atom); + +} // namespace js + +#endif /* vm_AtomsTable_h */ diff --git a/js/src/vm/BigIntType.cpp b/js/src/vm/BigIntType.cpp new file mode 100644 index 0000000000..a22712c662 --- /dev/null +++ b/js/src/vm/BigIntType.cpp @@ -0,0 +1,3840 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * Portions of this code taken from WebKit, whose copyright is as follows: + * + * Copyright (C) 2017 Caio Lima + * Copyright (C) 2017-2018 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Portions of this code taken from V8, whose copyright notice is as follows: + * + * Copyright 2017 the V8 project authors. All rights reserved. + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of Google Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Portions of this code taken from Dart, whose copyright notice is as follows: + * + * Copyright (c) 2014 the Dart project authors. Please see the AUTHORS file + * [1] for details. All rights reserved. Use of this source code is governed by + * a BSD-style license that can be found in the LICENSE file [2]. + * + * [1] https://github.com/dart-lang/sdk/blob/master/AUTHORS + * [2] https://github.com/dart-lang/sdk/blob/master/LICENSE + * + * Portions of this code taken from Go, whose copyright notice is as follows: + * + * Copyright 2009 The Go Authors. All rights reserved. + * Use of this source code is governed by a BSD-style + * license that can be found in the LICENSE file [3]. + * + * [3] https://golang.org/LICENSE + */ + +#include "vm/BigIntType.h" + +#include "mozilla/Casting.h" +#include "mozilla/FloatingPoint.h" +#include "mozilla/HashFunctions.h" +#include "mozilla/MathAlgorithms.h" +#include "mozilla/Maybe.h" +#include "mozilla/MemoryChecking.h" +#include "mozilla/Range.h" +#include "mozilla/RangedPtr.h" +#include "mozilla/Span.h" // mozilla::Span +#include "mozilla/WrappingOperations.h" + +#include +#include +#include +#include +#include // std::is_same_v + +#include "jsapi.h" +#include "jsnum.h" + +#include "builtin/BigInt.h" +#include "gc/Allocator.h" +#include "js/BigInt.h" +#include "js/Conversions.h" +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "js/Initialization.h" +#include "js/StableStringChars.h" +#include "js/Utility.h" +#include "util/CheckedArithmetic.h" +#include "vm/JSContext.h" +#include "vm/SelfHosting.h" + +#include "gc/FreeOp-inl.h" +#include "gc/Nursery-inl.h" +#include "vm/JSContext-inl.h" + +using namespace js; + +using JS::AutoStableStringChars; +using mozilla::Abs; +using mozilla::AssertedCast; +using mozilla::BitwiseCast; +using mozilla::IsFinite; +using mozilla::Maybe; +using mozilla::NegativeInfinity; +using mozilla::Nothing; +using mozilla::PositiveInfinity; +using mozilla::Range; +using mozilla::RangedPtr; +using mozilla::Some; +using mozilla::WrapToSigned; + +static inline unsigned DigitLeadingZeroes(BigInt::Digit x) { + return sizeof(x) == 4 ? mozilla::CountLeadingZeroes32(x) + : mozilla::CountLeadingZeroes64(x); +} + +#ifdef DEBUG +static bool HasLeadingZeroes(BigInt* bi) { + return bi->digitLength() > 0 && bi->digit(bi->digitLength() - 1) == 0; +} +#endif + +BigInt* BigInt::createUninitialized(JSContext* cx, size_t digitLength, + bool isNegative, gc::InitialHeap heap) { + if (digitLength > MaxDigitLength) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + + BigInt* x = AllocateBigInt(cx, heap); + if (!x) { + return nullptr; + } + + x->setLengthAndFlags(digitLength, isNegative ? SignBit : 0); + + MOZ_ASSERT(x->digitLength() == digitLength); + MOZ_ASSERT(x->isNegative() == isNegative); + + if (digitLength > InlineDigitsLength) { + x->heapDigits_ = js::AllocateBigIntDigits(cx, x, digitLength); + if (!x->heapDigits_) { + // |x| is partially initialized, expose it as a BigInt using inline digits + // to the GC. + x->setLengthAndFlags(0, 0); + return nullptr; + } + + AddCellMemory(x, digitLength * sizeof(Digit), js::MemoryUse::BigIntDigits); + } + + return x; +} + +void BigInt::initializeDigitsToZero() { + auto digs = digits(); + std::uninitialized_fill_n(digs.begin(), digs.Length(), 0); +} + +void BigInt::finalize(JSFreeOp* fop) { + MOZ_ASSERT(isTenured()); + if (hasHeapDigits()) { + size_t size = digitLength() * sizeof(Digit); + fop->free_(this, heapDigits_, size, js::MemoryUse::BigIntDigits); + } +} + +js::HashNumber BigInt::hash() const { + js::HashNumber h = + mozilla::HashBytes(digits().data(), digitLength() * sizeof(Digit)); + return mozilla::AddToHash(h, isNegative()); +} + +size_t BigInt::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return hasInlineDigits() ? 0 : mallocSizeOf(heapDigits_); +} + +size_t BigInt::sizeOfExcludingThisInNursery( + mozilla::MallocSizeOf mallocSizeOf) const { + MOZ_ASSERT(!isTenured()); + + if (hasInlineDigits()) { + return 0; + } + + const Nursery& nursery = runtimeFromMainThread()->gc.nursery(); + if (nursery.isInside(heapDigits_)) { + // See |AllocateBigIntDigits()|. + return RoundUp(digitLength() * sizeof(Digit), sizeof(Value)); + } + + return mallocSizeOf(heapDigits_); +} + +BigInt* BigInt::zero(JSContext* cx, gc::InitialHeap heap) { + return createUninitialized(cx, 0, false, heap); +} + +BigInt* BigInt::createFromDigit(JSContext* cx, Digit d, bool isNegative) { + MOZ_ASSERT(d != 0); + BigInt* res = createUninitialized(cx, 1, isNegative); + if (!res) { + return nullptr; + } + res->setDigit(0, d); + return res; +} + +BigInt* BigInt::one(JSContext* cx) { return createFromDigit(cx, 1, false); } + +BigInt* BigInt::negativeOne(JSContext* cx) { + return createFromDigit(cx, 1, true); +} + +BigInt* BigInt::createFromNonZeroRawUint64(JSContext* cx, uint64_t n, + bool isNegative) { + MOZ_ASSERT(n != 0); + + size_t resultLength = 1; + if (DigitBits == 32 && (n >> 32) != 0) { + resultLength = 2; + } + + BigInt* result = createUninitialized(cx, resultLength, isNegative); + if (!result) { + return nullptr; + } + result->setDigit(0, n); + if (DigitBits == 32 && resultLength > 1) { + result->setDigit(1, n >> 32); + } + + MOZ_ASSERT(!HasLeadingZeroes(result)); + return result; +} + +BigInt* BigInt::neg(JSContext* cx, HandleBigInt x) { + if (x->isZero()) { + return x; + } + + BigInt* result = copy(cx, x); + if (!result) { + return nullptr; + } + result->toggleHeaderFlagBit(SignBit); + return result; +} + +#if !defined(JS_64BIT) +# define HAVE_TWO_DIGIT 1 +using TwoDigit = uint64_t; +#elif defined(__SIZEOF_INT128__) +# define HAVE_TWO_DIGIT 1 +using TwoDigit = __uint128_t; +#endif + +inline BigInt::Digit BigInt::digitMul(Digit a, Digit b, Digit* high) { +#if defined(HAVE_TWO_DIGIT) + TwoDigit result = static_cast(a) * static_cast(b); + *high = result >> DigitBits; + + return static_cast(result); +#else + // Multiply in half-pointer-sized chunks. + // For inputs [AH AL]*[BH BL], the result is: + // + // [AL*BL] // rLow + // + [AL*BH] // rMid1 + // + [AH*BL] // rMid2 + // + [AH*BH] // rHigh + // = [R4 R3 R2 R1] // high = [R4 R3], low = [R2 R1] + // + // Where of course we must be careful with carries between the columns. + Digit aLow = a & HalfDigitMask; + Digit aHigh = a >> HalfDigitBits; + Digit bLow = b & HalfDigitMask; + Digit bHigh = b >> HalfDigitBits; + + Digit rLow = aLow * bLow; + Digit rMid1 = aLow * bHigh; + Digit rMid2 = aHigh * bLow; + Digit rHigh = aHigh * bHigh; + + Digit carry = 0; + Digit low = digitAdd(rLow, rMid1 << HalfDigitBits, &carry); + low = digitAdd(low, rMid2 << HalfDigitBits, &carry); + + *high = (rMid1 >> HalfDigitBits) + (rMid2 >> HalfDigitBits) + rHigh + carry; + + return low; +#endif +} + +BigInt::Digit BigInt::digitDiv(Digit high, Digit low, Digit divisor, + Digit* remainder) { + MOZ_ASSERT(high < divisor, "division must not overflow"); +#if defined(__x86_64__) + Digit quotient; + Digit rem; + __asm__("divq %[divisor]" + // Outputs: `quotient` will be in rax, `rem` in rdx. + : "=a"(quotient), "=d"(rem) + // Inputs: put `high` into rdx, `low` into rax, and `divisor` into + // any register or stack slot. + : "d"(high), "a"(low), [divisor] "rm"(divisor)); + *remainder = rem; + return quotient; +#elif defined(__i386__) + Digit quotient; + Digit rem; + __asm__("divl %[divisor]" + // Outputs: `quotient` will be in eax, `rem` in edx. + : "=a"(quotient), "=d"(rem) + // Inputs: put `high` into edx, `low` into eax, and `divisor` into + // any register or stack slot. + : "d"(high), "a"(low), [divisor] "rm"(divisor)); + *remainder = rem; + return quotient; +#else + static constexpr Digit HalfDigitBase = 1ull << HalfDigitBits; + // Adapted from Warren, Hacker's Delight, p. 152. + unsigned s = DigitLeadingZeroes(divisor); + // If `s` is DigitBits here, it causes an undefined behavior. + // But `s` is never DigitBits since `divisor` is never zero here. + MOZ_ASSERT(s != DigitBits); + divisor <<= s; + + Digit vn1 = divisor >> HalfDigitBits; + Digit vn0 = divisor & HalfDigitMask; + + // `sZeroMask` which is 0 if s == 0 and all 1-bits otherwise. + // + // `s` can be 0. If `s` is 0, performing "low >> (DigitBits - s)" must not + // be done since it causes an undefined behavior since `>> DigitBits` is + // undefined in C++. Quoted from C++ spec, "The type of the result is that of + // the promoted left operand. + // + // The behavior is undefined if the right operand is negative, or greater + // than or equal to the length in bits of the promoted left operand". We + // mask the right operand of the shift by `shiftMask` (`DigitBits - 1`), + // which makes `DigitBits - 0` zero. + // + // This shifting produces a value which covers 0 < `s` <= (DigitBits - 1) + // cases. `s` == DigitBits never happen as we asserted. Since `sZeroMask` + // clears the value in the case of `s` == 0, `s` == 0 case is also covered. + static_assert(sizeof(intptr_t) == sizeof(Digit), + "unexpected size of BigInt::Digit"); + Digit sZeroMask = + static_cast((-static_cast(s)) >> (DigitBits - 1)); + static constexpr unsigned shiftMask = DigitBits - 1; + Digit un32 = + (high << s) | ((low >> ((DigitBits - s) & shiftMask)) & sZeroMask); + + Digit un10 = low << s; + Digit un1 = un10 >> HalfDigitBits; + Digit un0 = un10 & HalfDigitMask; + Digit q1 = un32 / vn1; + Digit rhat = un32 - q1 * vn1; + + while (q1 >= HalfDigitBase || q1 * vn0 > rhat * HalfDigitBase + un1) { + q1--; + rhat += vn1; + if (rhat >= HalfDigitBase) { + break; + } + } + + Digit un21 = un32 * HalfDigitBase + un1 - q1 * divisor; + Digit q0 = un21 / vn1; + rhat = un21 - q0 * vn1; + + while (q0 >= HalfDigitBase || q0 * vn0 > rhat * HalfDigitBase + un0) { + q0--; + rhat += vn1; + if (rhat >= HalfDigitBase) { + break; + } + } + + *remainder = (un21 * HalfDigitBase + un0 - q0 * divisor) >> s; + return q1 * HalfDigitBase + q0; +#endif +} + +// Multiplies `source` with `factor` and adds `summand` to the result. +// `result` and `source` may be the same BigInt for inplace modification. +void BigInt::internalMultiplyAdd(BigInt* source, Digit factor, Digit summand, + unsigned n, BigInt* result) { + MOZ_ASSERT(source->digitLength() >= n); + MOZ_ASSERT(result->digitLength() >= n); + + Digit carry = summand; + Digit high = 0; + for (unsigned i = 0; i < n; i++) { + Digit current = source->digit(i); + Digit newCarry = 0; + + // Compute this round's multiplication. + Digit newHigh = 0; + current = digitMul(current, factor, &newHigh); + + // Add last round's carryovers. + current = digitAdd(current, high, &newCarry); + current = digitAdd(current, carry, &newCarry); + + // Store result and prepare for next round. + result->setDigit(i, current); + carry = newCarry; + high = newHigh; + } + + if (result->digitLength() > n) { + result->setDigit(n++, carry + high); + + // Current callers don't pass in such large results, but let's be robust. + while (n < result->digitLength()) { + result->setDigit(n++, 0); + } + } else { + MOZ_ASSERT(!(carry + high)); + } +} + +// Multiplies `this` with `factor` and adds `summand` to the result. +void BigInt::inplaceMultiplyAdd(Digit factor, Digit summand) { + internalMultiplyAdd(this, factor, summand, digitLength(), this); +} + +// Multiplies `multiplicand` with `multiplier` and adds the result to +// `accumulator`, starting at `accumulatorIndex` for the least-significant +// digit. Callers must ensure that `accumulator`'s digitLength and +// corresponding digit storage is long enough to hold the result. +void BigInt::multiplyAccumulate(BigInt* multiplicand, Digit multiplier, + BigInt* accumulator, + unsigned accumulatorIndex) { + MOZ_ASSERT(accumulator->digitLength() > + multiplicand->digitLength() + accumulatorIndex); + if (!multiplier) { + return; + } + + Digit carry = 0; + Digit high = 0; + for (unsigned i = 0; i < multiplicand->digitLength(); + i++, accumulatorIndex++) { + Digit acc = accumulator->digit(accumulatorIndex); + Digit newCarry = 0; + + // Add last round's carryovers. + acc = digitAdd(acc, high, &newCarry); + acc = digitAdd(acc, carry, &newCarry); + + // Compute this round's multiplication. + Digit multiplicandDigit = multiplicand->digit(i); + Digit low = digitMul(multiplier, multiplicandDigit, &high); + acc = digitAdd(acc, low, &newCarry); + + // Store result and prepare for next round. + accumulator->setDigit(accumulatorIndex, acc); + carry = newCarry; + } + + while (carry || high) { + MOZ_ASSERT(accumulatorIndex < accumulator->digitLength()); + Digit acc = accumulator->digit(accumulatorIndex); + Digit newCarry = 0; + acc = digitAdd(acc, high, &newCarry); + high = 0; + acc = digitAdd(acc, carry, &newCarry); + accumulator->setDigit(accumulatorIndex, acc); + carry = newCarry; + accumulatorIndex++; + } +} + +inline int8_t BigInt::absoluteCompare(BigInt* x, BigInt* y) { + MOZ_ASSERT(!HasLeadingZeroes(x)); + MOZ_ASSERT(!HasLeadingZeroes(y)); + + // Sanity checks to catch negative zeroes escaping to the wild. + MOZ_ASSERT(!x->isNegative() || !x->isZero()); + MOZ_ASSERT(!y->isNegative() || !y->isZero()); + + int diff = x->digitLength() - y->digitLength(); + if (diff) { + return diff < 0 ? -1 : 1; + } + + int i = x->digitLength() - 1; + while (i >= 0 && x->digit(i) == y->digit(i)) { + i--; + } + + if (i < 0) { + return 0; + } + + return x->digit(i) > y->digit(i) ? 1 : -1; +} + +BigInt* BigInt::absoluteAdd(JSContext* cx, HandleBigInt x, HandleBigInt y, + bool resultNegative) { + bool swap = x->digitLength() < y->digitLength(); + // Ensure `left` has at least as many digits as `right`. + HandleBigInt& left = swap ? y : x; + HandleBigInt& right = swap ? x : y; + + if (left->isZero()) { + MOZ_ASSERT(right->isZero()); + return left; + } + + if (right->isZero()) { + return resultNegative == left->isNegative() ? left : neg(cx, left); + } + + // Fast path for the likely-common case of up to a uint64_t of magnitude. + if (left->absFitsInUint64()) { + MOZ_ASSERT(right->absFitsInUint64()); + + uint64_t lhs = left->uint64FromAbsNonZero(); + uint64_t rhs = right->uint64FromAbsNonZero(); + + uint64_t res = lhs + rhs; + bool overflow = res < lhs; + MOZ_ASSERT(res != 0 || overflow); + + size_t resultLength = 1; + if (DigitBits == 32) { + if (overflow) { + resultLength = 3; + } else if (res >> 32) { + resultLength = 2; + } + } else { + if (overflow) { + resultLength = 2; + } + } + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + result->setDigit(0, res); + if (DigitBits == 32 && resultLength > 1) { + result->setDigit(1, res >> 32); + } + if (overflow) { + constexpr size_t overflowIndex = DigitBits == 32 ? 2 : 1; + result->setDigit(overflowIndex, 1); + } + + MOZ_ASSERT(!HasLeadingZeroes(result)); + return result; + } + + BigInt* result = + createUninitialized(cx, left->digitLength() + 1, resultNegative); + if (!result) { + return nullptr; + } + Digit carry = 0; + unsigned i = 0; + for (; i < right->digitLength(); i++) { + Digit newCarry = 0; + Digit sum = digitAdd(left->digit(i), right->digit(i), &newCarry); + sum = digitAdd(sum, carry, &newCarry); + result->setDigit(i, sum); + carry = newCarry; + } + + for (; i < left->digitLength(); i++) { + Digit newCarry = 0; + Digit sum = digitAdd(left->digit(i), carry, &newCarry); + result->setDigit(i, sum); + carry = newCarry; + } + + result->setDigit(i, carry); + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::absoluteSub(JSContext* cx, HandleBigInt x, HandleBigInt y, + bool resultNegative) { + MOZ_ASSERT(x->digitLength() >= y->digitLength()); + MOZ_ASSERT(absoluteCompare(x, y) > 0); + MOZ_ASSERT(!x->isZero()); + + if (y->isZero()) { + return resultNegative == x->isNegative() ? x : neg(cx, x); + } + + // Fast path for the likely-common case of up to a uint64_t of magnitude. + if (x->absFitsInUint64()) { + MOZ_ASSERT(y->absFitsInUint64()); + + uint64_t lhs = x->uint64FromAbsNonZero(); + uint64_t rhs = y->uint64FromAbsNonZero(); + MOZ_ASSERT(lhs > rhs); + + uint64_t res = lhs - rhs; + MOZ_ASSERT(res != 0); + + return createFromNonZeroRawUint64(cx, res, resultNegative); + } + + BigInt* result = createUninitialized(cx, x->digitLength(), resultNegative); + if (!result) { + return nullptr; + } + Digit borrow = 0; + unsigned i = 0; + for (; i < y->digitLength(); i++) { + Digit newBorrow = 0; + Digit difference = digitSub(x->digit(i), y->digit(i), &newBorrow); + difference = digitSub(difference, borrow, &newBorrow); + result->setDigit(i, difference); + borrow = newBorrow; + } + + for (; i < x->digitLength(); i++) { + Digit newBorrow = 0; + Digit difference = digitSub(x->digit(i), borrow, &newBorrow); + result->setDigit(i, difference); + borrow = newBorrow; + } + + MOZ_ASSERT(!borrow); + return destructivelyTrimHighZeroDigits(cx, result); +} + +// Divides `x` by `divisor`, returning the result in `quotient` and `remainder`. +// Mathematically, the contract is: +// +// quotient = (x - remainder) / divisor, with 0 <= remainder < divisor. +// +// If `quotient` is an empty handle, an appropriately sized BigInt will be +// allocated for it; otherwise the caller must ensure that it is big enough. +// `quotient` can be the same as `x` for an in-place division. `quotient` can +// also be `Nothing()` if the caller is only interested in the remainder. +// +// This function returns false if `quotient` is an empty handle, but allocating +// the quotient failed. Otherwise it returns true, indicating success. +bool BigInt::absoluteDivWithDigitDivisor( + JSContext* cx, HandleBigInt x, Digit divisor, + const Maybe& quotient, Digit* remainder, + bool quotientNegative) { + MOZ_ASSERT(divisor); + + MOZ_ASSERT(!x->isZero()); + *remainder = 0; + if (divisor == 1) { + if (quotient) { + BigInt* q; + if (x->isNegative() == quotientNegative) { + q = x; + } else { + q = neg(cx, x); + if (!q) { + return false; + } + } + quotient.value().set(q); + } + return true; + } + + unsigned length = x->digitLength(); + if (quotient) { + if (!quotient.value()) { + BigInt* q = createUninitialized(cx, length, quotientNegative); + if (!q) { + return false; + } + quotient.value().set(q); + } + + for (int i = length - 1; i >= 0; i--) { + Digit q = digitDiv(*remainder, x->digit(i), divisor, remainder); + quotient.value()->setDigit(i, q); + } + } else { + for (int i = length - 1; i >= 0; i--) { + digitDiv(*remainder, x->digit(i), divisor, remainder); + } + } + + return true; +} + +// Adds `summand` onto `this`, starting with `summand`'s 0th digit +// at `this`'s `startIndex`'th digit. Returns the "carry" (0 or 1). +BigInt::Digit BigInt::absoluteInplaceAdd(BigInt* summand, unsigned startIndex) { + Digit carry = 0; + unsigned n = summand->digitLength(); + MOZ_ASSERT(digitLength() > startIndex, + "must start adding at an in-range digit"); + MOZ_ASSERT(digitLength() - startIndex >= n, + "digits being added to must not extend above the digits in " + "this (except for the returned carry digit)"); + for (unsigned i = 0; i < n; i++) { + Digit newCarry = 0; + Digit sum = digitAdd(digit(startIndex + i), summand->digit(i), &newCarry); + sum = digitAdd(sum, carry, &newCarry); + setDigit(startIndex + i, sum); + carry = newCarry; + } + + return carry; +} + +// Subtracts `subtrahend` from this, starting with `subtrahend`'s 0th digit +// at `this`'s `startIndex`-th digit. Returns the "borrow" (0 or 1). +BigInt::Digit BigInt::absoluteInplaceSub(BigInt* subtrahend, + unsigned startIndex) { + Digit borrow = 0; + unsigned n = subtrahend->digitLength(); + MOZ_ASSERT(digitLength() > startIndex, + "must start subtracting from an in-range digit"); + MOZ_ASSERT(digitLength() - startIndex >= n, + "digits being subtracted from must not extend above the " + "digits in this (except for the returned borrow digit)"); + for (unsigned i = 0; i < n; i++) { + Digit newBorrow = 0; + Digit difference = + digitSub(digit(startIndex + i), subtrahend->digit(i), &newBorrow); + difference = digitSub(difference, borrow, &newBorrow); + setDigit(startIndex + i, difference); + borrow = newBorrow; + } + + return borrow; +} + +// Returns whether (factor1 * factor2) > (high << kDigitBits) + low. +inline bool BigInt::productGreaterThan(Digit factor1, Digit factor2, Digit high, + Digit low) { + Digit resultHigh; + Digit resultLow = digitMul(factor1, factor2, &resultHigh); + return resultHigh > high || (resultHigh == high && resultLow > low); +} + +void BigInt::inplaceRightShiftLowZeroBits(unsigned shift) { + MOZ_ASSERT(shift < DigitBits); + MOZ_ASSERT(!(digit(0) & ((static_cast(1) << shift) - 1)), + "should only be shifting away zeroes"); + + if (!shift) { + return; + } + + Digit carry = digit(0) >> shift; + unsigned last = digitLength() - 1; + for (unsigned i = 0; i < last; i++) { + Digit d = digit(i + 1); + setDigit(i, (d << (DigitBits - shift)) | carry); + carry = d >> shift; + } + setDigit(last, carry); +} + +// Always copies the input, even when `shift` == 0. +BigInt* BigInt::absoluteLeftShiftAlwaysCopy(JSContext* cx, HandleBigInt x, + unsigned shift, + LeftShiftMode mode) { + MOZ_ASSERT(shift < DigitBits); + MOZ_ASSERT(!x->isZero()); + + unsigned n = x->digitLength(); + unsigned resultLength = mode == LeftShiftMode::AlwaysAddOneDigit ? n + 1 : n; + BigInt* result = createUninitialized(cx, resultLength, x->isNegative()); + if (!result) { + return nullptr; + } + + if (!shift) { + for (unsigned i = 0; i < n; i++) { + result->setDigit(i, x->digit(i)); + } + if (mode == LeftShiftMode::AlwaysAddOneDigit) { + result->setDigit(n, 0); + } + + return result; + } + + Digit carry = 0; + for (unsigned i = 0; i < n; i++) { + Digit d = x->digit(i); + result->setDigit(i, (d << shift) | carry); + carry = d >> (DigitBits - shift); + } + + if (mode == LeftShiftMode::AlwaysAddOneDigit) { + result->setDigit(n, carry); + } else { + MOZ_ASSERT(mode == LeftShiftMode::SameSizeResult); + MOZ_ASSERT(!carry); + } + + return result; +} + +// Divides `dividend` by `divisor`, returning the result in `quotient` and +// `remainder`. Mathematically, the contract is: +// +// quotient = (dividend - remainder) / divisor, with 0 <= remainder < divisor. +// +// Both `quotient` and `remainder` are optional, for callers that are only +// interested in one of them. See Knuth, Volume 2, section 4.3.1, Algorithm D. +// Also see the overview of the algorithm by Jan Marthedal Rasmussen over at +// https://janmr.com/blog/2014/04/basic-multiple-precision-long-division/. +bool BigInt::absoluteDivWithBigIntDivisor( + JSContext* cx, HandleBigInt dividend, HandleBigInt divisor, + const Maybe& quotient, + const Maybe& remainder, bool isNegative) { + MOZ_ASSERT(divisor->digitLength() >= 2); + MOZ_ASSERT(dividend->digitLength() >= divisor->digitLength()); + + // Any early error return is detectable by checking the quotient and/or + // remainder output values. + MOZ_ASSERT(!quotient || !quotient.value()); + MOZ_ASSERT(!remainder || !remainder.value()); + + // The unusual variable names inside this function are consistent with + // Knuth's book, as well as with Go's implementation of this algorithm. + // Maintaining this consistency is probably more useful than trying to + // come up with more descriptive names for them. + const unsigned n = divisor->digitLength(); + const unsigned m = dividend->digitLength() - n; + + // The quotient to be computed. + RootedBigInt q(cx); + if (quotient) { + q = createUninitialized(cx, m + 1, isNegative); + if (!q) { + return false; + } + } + + // In each iteration, `qhatv` holds `divisor` * `current quotient digit`. + // "v" is the book's name for `divisor`, `qhat` the current quotient digit. + RootedBigInt qhatv(cx, createUninitialized(cx, n + 1, isNegative)); + if (!qhatv) { + return false; + } + + // D1. + // Left-shift inputs so that the divisor's MSB is set. This is necessary to + // prevent the digit-wise divisions (see digitDiv call below) from + // overflowing (they take a two digits wide input, and return a one digit + // result). + Digit lastDigit = divisor->digit(n - 1); + unsigned shift = DigitLeadingZeroes(lastDigit); + + RootedBigInt shiftedDivisor(cx); + if (shift > 0) { + shiftedDivisor = absoluteLeftShiftAlwaysCopy(cx, divisor, shift, + LeftShiftMode::SameSizeResult); + if (!shiftedDivisor) { + return false; + } + } else { + shiftedDivisor = divisor; + } + + // Holds the (continuously updated) remaining part of the dividend, which + // eventually becomes the remainder. + RootedBigInt u(cx, + absoluteLeftShiftAlwaysCopy(cx, dividend, shift, + LeftShiftMode::AlwaysAddOneDigit)); + if (!u) { + return false; + } + + // D2. + // Iterate over the dividend's digit (like the "grade school" algorithm). + // `vn1` is the divisor's most significant digit. + Digit vn1 = shiftedDivisor->digit(n - 1); + for (int j = m; j >= 0; j--) { + // D3. + // Estimate the current iteration's quotient digit (see Knuth for details). + // `qhat` is the current quotient digit. + Digit qhat = std::numeric_limits::max(); + + // `ujn` is the dividend's most significant remaining digit. + Digit ujn = u->digit(j + n); + if (ujn != vn1) { + // `rhat` is the current iteration's remainder. + Digit rhat = 0; + // Estimate the current quotient digit by dividing the most significant + // digits of dividend and divisor. The result will not be too small, + // but could be a bit too large. + qhat = digitDiv(ujn, u->digit(j + n - 1), vn1, &rhat); + + // Decrement the quotient estimate as needed by looking at the next + // digit, i.e. by testing whether + // qhat * v_{n-2} > (rhat << DigitBits) + u_{j+n-2}. + Digit vn2 = shiftedDivisor->digit(n - 2); + Digit ujn2 = u->digit(j + n - 2); + while (productGreaterThan(qhat, vn2, rhat, ujn2)) { + qhat--; + Digit prevRhat = rhat; + rhat += vn1; + // v[n-1] >= 0, so this tests for overflow. + if (rhat < prevRhat) { + break; + } + } + } + + // D4. + // Multiply the divisor with the current quotient digit, and subtract + // it from the dividend. If there was "borrow", then the quotient digit + // was one too high, so we must correct it and undo one subtraction of + // the (shifted) divisor. + internalMultiplyAdd(shiftedDivisor, qhat, 0, n, qhatv); + Digit c = u->absoluteInplaceSub(qhatv, j); + if (c) { + c = u->absoluteInplaceAdd(shiftedDivisor, j); + u->setDigit(j + n, u->digit(j + n) + c); + qhat--; + } + + if (quotient) { + q->setDigit(j, qhat); + } + } + + if (quotient) { + BigInt* bi = destructivelyTrimHighZeroDigits(cx, q); + if (!bi) { + return false; + } + quotient.value().set(q); + } + + if (remainder) { + u->inplaceRightShiftLowZeroBits(shift); + remainder.value().set(u); + } + + return true; +} + +// Helper for Absolute{And,AndNot,Or,Xor}. +// Performs the given binary `op` on digit pairs of `x` and `y`; when the +// end of the shorter of the two is reached, `kind` configures how +// remaining digits are handled. +// Example: +// y: [ y2 ][ y1 ][ y0 ] +// x: [ x3 ][ x2 ][ x1 ][ x0 ] +// | | | | +// (Fill) (op) (op) (op) +// | | | | +// v v v v +// result: [ 0 ][ x3 ][ r2 ][ r1 ][ r0 ] +template +inline BigInt* BigInt::absoluteBitwiseOp(JSContext* cx, HandleBigInt x, + HandleBigInt y, BitwiseOp&& op) { + unsigned xLength = x->digitLength(); + unsigned yLength = y->digitLength(); + unsigned numPairs = std::min(xLength, yLength); + unsigned resultLength; + if (kind == BitwiseOpKind::SymmetricTrim) { + resultLength = numPairs; + } else if (kind == BitwiseOpKind::SymmetricFill) { + resultLength = std::max(xLength, yLength); + } else { + MOZ_ASSERT(kind == BitwiseOpKind::AsymmetricFill); + resultLength = xLength; + } + bool resultNegative = false; + + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + + unsigned i = 0; + for (; i < numPairs; i++) { + result->setDigit(i, op(x->digit(i), y->digit(i))); + } + + if (kind != BitwiseOpKind::SymmetricTrim) { + BigInt* source = kind == BitwiseOpKind::AsymmetricFill ? x + : xLength == i ? y + : x; + for (; i < resultLength; i++) { + result->setDigit(i, source->digit(i)); + } + } + + MOZ_ASSERT(i == resultLength); + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::absoluteAnd(JSContext* cx, HandleBigInt x, HandleBigInt y) { + return absoluteBitwiseOp(cx, x, y, + std::bit_and()); +} + +BigInt* BigInt::absoluteOr(JSContext* cx, HandleBigInt x, HandleBigInt y) { + return absoluteBitwiseOp(cx, x, y, + std::bit_or()); +} + +BigInt* BigInt::absoluteAndNot(JSContext* cx, HandleBigInt x, HandleBigInt y) { + auto digitOperation = [](Digit a, Digit b) { return a & ~b; }; + return absoluteBitwiseOp(cx, x, y, + digitOperation); +} + +BigInt* BigInt::absoluteXor(JSContext* cx, HandleBigInt x, HandleBigInt y) { + return absoluteBitwiseOp(cx, x, y, + std::bit_xor()); +} + +BigInt* BigInt::absoluteAddOne(JSContext* cx, HandleBigInt x, + bool resultNegative) { + unsigned inputLength = x->digitLength(); + // The addition will overflow into a new digit if all existing digits are + // at maximum. + bool willOverflow = true; + for (unsigned i = 0; i < inputLength; i++) { + if (std::numeric_limits::max() != x->digit(i)) { + willOverflow = false; + break; + } + } + + unsigned resultLength = inputLength + willOverflow; + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + + Digit carry = 1; + for (unsigned i = 0; i < inputLength; i++) { + Digit newCarry = 0; + result->setDigit(i, digitAdd(x->digit(i), carry, &newCarry)); + carry = newCarry; + } + if (resultLength > inputLength) { + MOZ_ASSERT(carry == 1); + result->setDigit(inputLength, 1); + } else { + MOZ_ASSERT(!carry); + } + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::absoluteSubOne(JSContext* cx, HandleBigInt x, + bool resultNegative) { + MOZ_ASSERT(!x->isZero()); + + unsigned length = x->digitLength(); + + if (length == 1) { + Digit d = x->digit(0); + if (d == 1) { + // Ignore resultNegative. + return zero(cx); + } + return createFromDigit(cx, d - 1, resultNegative); + } + + BigInt* result = createUninitialized(cx, length, resultNegative); + if (!result) { + return nullptr; + } + + Digit borrow = 1; + for (unsigned i = 0; i < length; i++) { + Digit newBorrow = 0; + result->setDigit(i, digitSub(x->digit(i), borrow, &newBorrow)); + borrow = newBorrow; + } + MOZ_ASSERT(!borrow); + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::inc(JSContext* cx, HandleBigInt x) { + if (x->isZero()) { + return one(cx); + } + + bool isNegative = x->isNegative(); + if (isNegative) { + return absoluteSubOne(cx, x, isNegative); + } + + return absoluteAddOne(cx, x, isNegative); +} + +BigInt* BigInt::dec(JSContext* cx, HandleBigInt x) { + if (x->isZero()) { + return negativeOne(cx); + } + + bool isNegative = x->isNegative(); + if (isNegative) { + return absoluteAddOne(cx, x, isNegative); + } + + return absoluteSubOne(cx, x, isNegative); +} + +// Lookup table for the maximum number of bits required per character of a +// base-N string representation of a number. To increase accuracy, the array +// value is the actual value multiplied by 32. To generate this table: +// for (var i = 0; i <= 36; i++) { print(Math.ceil(Math.log2(i) * 32) + ","); } +static constexpr uint8_t maxBitsPerCharTable[] = { + 0, 0, 32, 51, 64, 75, 83, 90, 96, // 0..8 + 102, 107, 111, 115, 119, 122, 126, 128, // 9..16 + 131, 134, 136, 139, 141, 143, 145, 147, // 17..24 + 149, 151, 153, 154, 156, 158, 159, 160, // 25..32 + 162, 163, 165, 166, // 33..36 +}; + +static constexpr unsigned bitsPerCharTableShift = 5; +static constexpr size_t bitsPerCharTableMultiplier = 1u + << bitsPerCharTableShift; +static constexpr char radixDigits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; + +static inline uint64_t CeilDiv(uint64_t numerator, uint64_t denominator) { + MOZ_ASSERT(numerator != 0); + return 1 + (numerator - 1) / denominator; +}; + +// Compute (an overapproximation of) the length of the string representation of +// a BigInt. In base B an X-digit number has maximum value: +// +// B**X - 1 +// +// We're trying to find N for an N-digit number in base |radix| full +// representing a |bitLength|-digit number in base 2, so we have: +// +// radix**N - 1 ≥ 2**bitLength - 1 +// radix**N ≥ 2**bitLength +// N ≥ log2(2**bitLength) / log2(radix) +// N ≥ bitLength / log2(radix) +// +// so the smallest N is: +// +// N = ⌈bitLength / log2(radix)⌉ +// +// We want to avoid floating-point computations and precompute the logarithm, so +// we multiply both sides of the division by |bitsPerCharTableMultiplier|: +// +// N = ⌈(bPCTM * bitLength) / (bPCTM * log2(radix))⌉ +// +// and then because |maxBitsPerChar| representing the denominator may have been +// rounded *up* -- which could produce an overall under-computation -- we reduce +// by one to undo any rounding and conservatively compute: +// +// N ≥ ⌈(bPCTM * bitLength) / (maxBitsPerChar - 1)⌉ +// +size_t BigInt::calculateMaximumCharactersRequired(HandleBigInt x, + unsigned radix) { + MOZ_ASSERT(!x->isZero()); + MOZ_ASSERT(radix >= 2 && radix <= 36); + + size_t length = x->digitLength(); + Digit lastDigit = x->digit(length - 1); + size_t bitLength = length * DigitBits - DigitLeadingZeroes(lastDigit); + + uint8_t maxBitsPerChar = maxBitsPerCharTable[radix]; + uint64_t maximumCharactersRequired = + CeilDiv(static_cast(bitsPerCharTableMultiplier) * bitLength, + maxBitsPerChar - 1); + maximumCharactersRequired += x->isNegative(); + + return AssertedCast(maximumCharactersRequired); +} + +template +JSLinearString* BigInt::toStringBasePowerOfTwo(JSContext* cx, HandleBigInt x, + unsigned radix) { + MOZ_ASSERT(mozilla::IsPowerOfTwo(radix)); + MOZ_ASSERT(radix >= 2 && radix <= 32); + MOZ_ASSERT(!x->isZero()); + + const unsigned length = x->digitLength(); + const bool sign = x->isNegative(); + const unsigned bitsPerChar = mozilla::CountTrailingZeroes32(radix); + const unsigned charMask = radix - 1; + // Compute the length of the resulting string: divide the bit length of the + // BigInt by the number of bits representable per character (rounding up). + const Digit msd = x->digit(length - 1); + + const size_t bitLength = length * DigitBits - DigitLeadingZeroes(msd); + const size_t charsRequired = CeilDiv(bitLength, bitsPerChar) + sign; + + if (charsRequired > JSString::MAX_LENGTH) { + ReportOutOfMemory(cx); + return nullptr; + } + + auto resultChars = cx->make_pod_array(charsRequired); + if (!resultChars) { + return nullptr; + } + + Digit digit = 0; + // Keeps track of how many unprocessed bits there are in |digit|. + unsigned availableBits = 0; + size_t pos = charsRequired; + for (unsigned i = 0; i < length - 1; i++) { + Digit newDigit = x->digit(i); + // Take any leftover bits from the last iteration into account. + unsigned current = (digit | (newDigit << availableBits)) & charMask; + MOZ_ASSERT(pos); + resultChars[--pos] = radixDigits[current]; + unsigned consumedBits = bitsPerChar - availableBits; + digit = newDigit >> consumedBits; + availableBits = DigitBits - consumedBits; + while (availableBits >= bitsPerChar) { + MOZ_ASSERT(pos); + resultChars[--pos] = radixDigits[digit & charMask]; + digit >>= bitsPerChar; + availableBits -= bitsPerChar; + } + } + + // Write out the character containing the lowest-order bit of |msd|. + // + // This character may include leftover bits from the Digit below |msd|. For + // example, if |x === 2n**64n| and |radix == 32|: the preceding loop writes + // twelve zeroes for low-order bits 0-59 in |x->digit(0)| (and |x->digit(1)| + // on 32-bit); then the highest 4 bits of of |x->digit(0)| (or |x->digit(1)| + // on 32-bit) and bit 0 of |x->digit(1)| (|x->digit(2)| on 32-bit) will + // comprise the |current == 0b1'0000| computed below for the high-order 'g' + // character. + unsigned current = (digit | (msd << availableBits)) & charMask; + MOZ_ASSERT(pos); + resultChars[--pos] = radixDigits[current]; + + // Write out remaining characters represented by |msd|. (There may be none, + // as in the example above.) + digit = msd >> (bitsPerChar - availableBits); + while (digit != 0) { + MOZ_ASSERT(pos); + resultChars[--pos] = radixDigits[digit & charMask]; + digit >>= bitsPerChar; + } + + if (sign) { + MOZ_ASSERT(pos); + resultChars[--pos] = '-'; + } + + MOZ_ASSERT(pos == 0); + return NewStringCopyN(cx, resultChars.get(), charsRequired); +} + +template +JSLinearString* BigInt::toStringSingleDigitBaseTen(JSContext* cx, Digit digit, + bool isNegative) { + if (digit <= Digit(INT32_MAX)) { + int32_t val = AssertedCast(digit); + return Int32ToString(cx, isNegative ? -val : val); + } + + MOZ_ASSERT(digit != 0, "zero case should have been handled in toString"); + + constexpr size_t maxLength = 1 + (std::numeric_limits::digits10 + 1); + static_assert(maxLength == 11 || maxLength == 21, + "unexpected decimal string length"); + + char resultChars[maxLength]; + size_t writePos = maxLength; + + while (digit != 0) { + MOZ_ASSERT(writePos > 0); + resultChars[--writePos] = radixDigits[digit % 10]; + digit /= 10; + } + MOZ_ASSERT(writePos < maxLength); + MOZ_ASSERT(resultChars[writePos] != '0'); + + if (isNegative) { + MOZ_ASSERT(writePos > 0); + resultChars[--writePos] = '-'; + } + + MOZ_ASSERT(writePos < maxLength); + return NewStringCopyN(cx, resultChars + writePos, + maxLength - writePos); +} + +static constexpr BigInt::Digit MaxPowerInDigit(uint8_t radix) { + BigInt::Digit result = 1; + while (result < BigInt::Digit(-1) / radix) { + result *= radix; + } + return result; +} + +static constexpr uint8_t MaxExponentInDigit(uint8_t radix) { + uint8_t exp = 0; + BigInt::Digit result = 1; + while (result < BigInt::Digit(-1) / radix) { + result *= radix; + exp += 1; + } + return exp; +} + +struct RadixInfo { + BigInt::Digit maxPowerInDigit; + uint8_t maxExponentInDigit; + + constexpr RadixInfo(BigInt::Digit maxPower, uint8_t maxExponent) + : maxPowerInDigit(maxPower), maxExponentInDigit(maxExponent) {} + + explicit constexpr RadixInfo(uint8_t radix) + : RadixInfo(MaxPowerInDigit(radix), MaxExponentInDigit(radix)) {} +}; + +static constexpr const RadixInfo toStringInfo[37] = { + {0, 0}, {0, 0}, RadixInfo(2), RadixInfo(3), RadixInfo(4), + RadixInfo(5), RadixInfo(6), RadixInfo(7), RadixInfo(8), RadixInfo(9), + RadixInfo(10), RadixInfo(11), RadixInfo(12), RadixInfo(13), RadixInfo(14), + RadixInfo(15), RadixInfo(16), RadixInfo(17), RadixInfo(18), RadixInfo(19), + RadixInfo(20), RadixInfo(21), RadixInfo(22), RadixInfo(23), RadixInfo(24), + RadixInfo(25), RadixInfo(26), RadixInfo(27), RadixInfo(28), RadixInfo(29), + RadixInfo(30), RadixInfo(31), RadixInfo(32), RadixInfo(33), RadixInfo(34), + RadixInfo(35), RadixInfo(36), +}; + +JSLinearString* BigInt::toStringGeneric(JSContext* cx, HandleBigInt x, + unsigned radix) { + MOZ_ASSERT(radix >= 2 && radix <= 36); + MOZ_ASSERT(!x->isZero()); + + size_t maximumCharactersRequired = + calculateMaximumCharactersRequired(x, radix); + if (maximumCharactersRequired > JSString::MAX_LENGTH) { + ReportOutOfMemory(cx); + return nullptr; + } + + UniqueChars resultString(js_pod_malloc(maximumCharactersRequired)); + if (!resultString) { + ReportOutOfMemory(cx); + return nullptr; + } + + size_t writePos = maximumCharactersRequired; + unsigned length = x->digitLength(); + Digit lastDigit; + if (length == 1) { + lastDigit = x->digit(0); + } else { + unsigned chunkChars = toStringInfo[radix].maxExponentInDigit; + Digit chunkDivisor = toStringInfo[radix].maxPowerInDigit; + + unsigned nonZeroDigit = length - 1; + MOZ_ASSERT(x->digit(nonZeroDigit) != 0); + + // `rest` holds the part of the BigInt that we haven't looked at yet. + // Not to be confused with "remainder"! + RootedBigInt rest(cx); + + // In the first round, divide the input, allocating a new BigInt for + // the result == rest; from then on divide the rest in-place. + // + // FIXME: absoluteDivWithDigitDivisor doesn't + // destructivelyTrimHighZeroDigits for in-place divisions, leading to + // worse constant factors. See + // https://bugzilla.mozilla.org/show_bug.cgi?id=1510213. + RootedBigInt dividend(cx, x); + do { + Digit chunk; + if (!absoluteDivWithDigitDivisor(cx, dividend, chunkDivisor, Some(&rest), + &chunk, dividend->isNegative())) { + return nullptr; + } + + dividend = rest; + for (unsigned i = 0; i < chunkChars; i++) { + MOZ_ASSERT(writePos > 0); + resultString[--writePos] = radixDigits[chunk % radix]; + chunk /= radix; + } + MOZ_ASSERT(!chunk); + + if (!rest->digit(nonZeroDigit)) { + nonZeroDigit--; + } + + MOZ_ASSERT(rest->digit(nonZeroDigit) != 0, + "division by a single digit can't remove more than one " + "digit from a number"); + } while (nonZeroDigit > 0); + + lastDigit = rest->digit(0); + } + + do { + MOZ_ASSERT(writePos > 0); + resultString[--writePos] = radixDigits[lastDigit % radix]; + lastDigit /= radix; + } while (lastDigit > 0); + MOZ_ASSERT(writePos < maximumCharactersRequired); + MOZ_ASSERT(maximumCharactersRequired - writePos <= + static_cast(maximumCharactersRequired)); + + // Remove leading zeroes. + while (writePos + 1 < maximumCharactersRequired && + resultString[writePos] == '0') { + writePos++; + } + + if (x->isNegative()) { + MOZ_ASSERT(writePos > 0); + resultString[--writePos] = '-'; + } + + MOZ_ASSERT(writePos < maximumCharactersRequired); + // Would be better to somehow adopt resultString directly. + return NewStringCopyN(cx, resultString.get() + writePos, + maximumCharactersRequired - writePos); +} + +static void FreeDigits(JSContext* cx, BigInt* bi, BigInt::Digit* digits, + size_t nbytes) { + if (cx->isHelperThreadContext()) { + js_free(digits); + } else if (bi->isTenured()) { + MOZ_ASSERT(!cx->nursery().isInside(digits)); + js_free(digits); + } else { + cx->nursery().freeBuffer(digits, nbytes); + } +} + +BigInt* BigInt::destructivelyTrimHighZeroDigits(JSContext* cx, BigInt* x) { + if (x->isZero()) { + MOZ_ASSERT(!x->isNegative()); + return x; + } + MOZ_ASSERT(x->digitLength()); + + int nonZeroIndex = x->digitLength() - 1; + while (nonZeroIndex >= 0 && x->digit(nonZeroIndex) == 0) { + nonZeroIndex--; + } + + if (nonZeroIndex < 0) { + return zero(cx); + } + + if (nonZeroIndex == static_cast(x->digitLength() - 1)) { + return x; + } + + unsigned newLength = nonZeroIndex + 1; + + if (newLength > InlineDigitsLength) { + MOZ_ASSERT(x->hasHeapDigits()); + + size_t oldLength = x->digitLength(); + Digit* newdigits = + js::ReallocateBigIntDigits(cx, x, x->heapDigits_, oldLength, newLength); + if (!newdigits) { + return nullptr; + } + x->heapDigits_ = newdigits; + + RemoveCellMemory(x, oldLength * sizeof(Digit), js::MemoryUse::BigIntDigits); + AddCellMemory(x, newLength * sizeof(Digit), js::MemoryUse::BigIntDigits); + } else { + if (x->hasHeapDigits()) { + Digit digits[InlineDigitsLength]; + std::copy_n(x->heapDigits_, InlineDigitsLength, digits); + + size_t nbytes = x->digitLength() * sizeof(Digit); + FreeDigits(cx, x, x->heapDigits_, nbytes); + RemoveCellMemory(x, nbytes, js::MemoryUse::BigIntDigits); + + std::copy_n(digits, InlineDigitsLength, x->inlineDigits_); + } + } + + x->setLengthAndFlags(newLength, x->isNegative() ? SignBit : 0); + + return x; +} + +// The maximum value `radix**charCount - 1` must be represented as a max number +// `2**(N * DigitBits) - 1` for `N` digits, so +// +// 2**(N * DigitBits) - 1 ≥ radix**charcount - 1 +// 2**(N * DigitBits) ≥ radix**charcount +// N * DigitBits ≥ log2(radix**charcount) +// N * DigitBits ≥ charcount * log2(radix) +// N ≥ ⌈charcount * log2(radix) / DigitBits⌉ (conservatively) +// +// or in the code's terms (all numbers promoted to exact mathematical values), +// +// N ≥ ⌈charcount * bitsPerChar / (DigitBits * bitsPerCharTableMultiplier)⌉ +// +// Note that `N` is computed even more conservatively here because `bitsPerChar` +// is rounded up. +bool BigInt::calculateMaximumDigitsRequired(JSContext* cx, uint8_t radix, + size_t charcount, size_t* result) { + MOZ_ASSERT(2 <= radix && radix <= 36); + + uint8_t bitsPerChar = maxBitsPerCharTable[radix]; + + MOZ_ASSERT(charcount > 0); + MOZ_ASSERT(charcount <= std::numeric_limits::max() / bitsPerChar); + static_assert( + MaxDigitLength < std::numeric_limits::max(), + "can't safely cast calculateMaximumDigitsRequired result to size_t"); + + uint64_t n = CeilDiv(static_cast(charcount) * bitsPerChar, + DigitBits * bitsPerCharTableMultiplier); + if (n > MaxDigitLength) { + ReportOutOfMemory(cx); + return false; + } + + *result = n; + return true; +} + +template +BigInt* BigInt::parseLiteralDigits(JSContext* cx, + const Range chars, + unsigned radix, bool isNegative, + bool* haveParseError, gc::InitialHeap heap) { + static_assert( + std::is_same_v || std::is_same_v, + "only the bare minimum character types are supported, to avoid " + "excessively instantiating this template"); + + MOZ_ASSERT(chars.length()); + + RangedPtr start = chars.begin(); + RangedPtr end = chars.end(); + + // Skipping leading zeroes. + while (start[0] == '0') { + start++; + if (start == end) { + return zero(cx, heap); + } + } + + unsigned limit0 = '0' + std::min(radix, 10u); + unsigned limita = 'a' + (radix - 10); + unsigned limitA = 'A' + (radix - 10); + + size_t length; + if (!calculateMaximumDigitsRequired(cx, radix, end - start, &length)) { + return nullptr; + } + BigInt* result = createUninitialized(cx, length, isNegative, heap); + if (!result) { + return nullptr; + } + + result->initializeDigitsToZero(); + + for (; start < end; start++) { + uint32_t digit; + CharT c = *start; + if (c >= '0' && c < limit0) { + digit = c - '0'; + } else if (c >= 'a' && c < limita) { + digit = c - 'a' + 10; + } else if (c >= 'A' && c < limitA) { + digit = c - 'A' + 10; + } else { + *haveParseError = true; + return nullptr; + } + + result->inplaceMultiplyAdd(static_cast(radix), + static_cast(digit)); + } + + return destructivelyTrimHighZeroDigits(cx, result); +} + +// BigInt proposal section 7.2 +template +BigInt* BigInt::parseLiteral(JSContext* cx, const Range chars, + bool* haveParseError) { + RangedPtr start = chars.begin(); + const RangedPtr end = chars.end(); + bool isNegative = false; + + MOZ_ASSERT(chars.length()); + + // This function is only called from the frontend when parsing BigInts. Parsed + // BigInts are stored in the script's data vector and therefore need to be + // allocated in the tenured heap. + constexpr gc::InitialHeap heap = gc::TenuredHeap; + + if (end - start > 2 && start[0] == '0') { + if (start[1] == 'b' || start[1] == 'B') { + // StringNumericLiteral ::: BinaryIntegerLiteral + return parseLiteralDigits(cx, Range(start + 2, end), 2, + isNegative, haveParseError, heap); + } + if (start[1] == 'x' || start[1] == 'X') { + // StringNumericLiteral ::: HexIntegerLiteral + return parseLiteralDigits(cx, Range(start + 2, end), 16, + isNegative, haveParseError, heap); + } + if (start[1] == 'o' || start[1] == 'O') { + // StringNumericLiteral ::: OctalIntegerLiteral + return parseLiteralDigits(cx, Range(start + 2, end), 8, + isNegative, haveParseError, heap); + } + } + + return parseLiteralDigits(cx, Range(start, end), 10, isNegative, + haveParseError, heap); +} + +template +bool BigInt::literalIsZeroNoRadix(const Range chars) { + MOZ_ASSERT(chars.length()); + + RangedPtr start = chars.begin(); + RangedPtr end = chars.end(); + + // Skipping leading zeroes. + while (start[0] == '0') { + start++; + if (start == end) { + return true; + } + } + + return false; +} + +// trim and remove radix selection prefix. +template +bool BigInt::literalIsZero(const Range chars) { + RangedPtr start = chars.begin(); + const RangedPtr end = chars.end(); + + MOZ_ASSERT(chars.length()); + + // Skip over radix selector. + if (end - start > 2 && start[0] == '0') { + if (start[1] == 'b' || start[1] == 'B' || start[1] == 'x' || + start[1] == 'X' || start[1] == 'o' || start[1] == 'O') { + return literalIsZeroNoRadix(Range(start + 2, end)); + } + } + + return literalIsZeroNoRadix(Range(start, end)); +} + +template bool BigInt::literalIsZero(const Range chars); + +BigInt* BigInt::createFromDouble(JSContext* cx, double d) { + MOZ_ASSERT(IsInteger(d), "Only integer-valued doubles can convert to BigInt"); + + if (d == 0) { + return zero(cx); + } + + int exponent = mozilla::ExponentComponent(d); + MOZ_ASSERT(exponent >= 0); + int length = exponent / DigitBits + 1; + BigInt* result = createUninitialized(cx, length, d < 0); + if (!result) { + return nullptr; + } + + // We construct a BigInt from the double `d` by shifting its mantissa + // according to its exponent and mapping the bit pattern onto digits. + // + // <----------- bitlength = exponent + 1 -----------> + // <----- 52 ------> <------ trailing zeroes ------> + // mantissa: 1yyyyyyyyyyyyyyyyy 0000000000000000000000000000000 + // digits: 0001xxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx xxxxxxxx + // <--> <------> + // msdTopBits DigitBits + // + using Double = mozilla::FloatingPoint; + uint64_t mantissa = + mozilla::BitwiseCast(d) & Double::kSignificandBits; + // Add implicit high bit. + mantissa |= 1ull << Double::kSignificandWidth; + + const int mantissaTopBit = Double::kSignificandWidth; // 0-indexed. + + // 0-indexed position of `d`'s most significant bit within the `msd`. + int msdTopBit = exponent % DigitBits; + + // Next digit under construction. + Digit digit; + + // First, build the MSD by shifting the mantissa appropriately. + if (msdTopBit < mantissaTopBit) { + int remainingMantissaBits = mantissaTopBit - msdTopBit; + digit = mantissa >> remainingMantissaBits; + mantissa = mantissa << (64 - remainingMantissaBits); + } else { + MOZ_ASSERT(msdTopBit >= mantissaTopBit); + digit = mantissa << (msdTopBit - mantissaTopBit); + mantissa = 0; + } + MOZ_ASSERT(digit != 0, "most significant digit should not be zero"); + result->setDigit(--length, digit); + + // Fill in digits containing mantissa contributions. + while (mantissa) { + MOZ_ASSERT(length > 0, + "double bits were all non-fractional, so there must be " + "digits present to hold them"); + + if (DigitBits == 64) { + result->setDigit(--length, mantissa); + break; + } + + MOZ_ASSERT(DigitBits == 32); + Digit current = mantissa >> 32; + mantissa = mantissa << 32; + result->setDigit(--length, current); + } + + // Fill in low-order zeroes. + for (int i = length - 1; i >= 0; i--) { + result->setDigit(i, 0); + } + + return result; +} + +BigInt* BigInt::createFromUint64(JSContext* cx, uint64_t n) { + if (n == 0) { + return zero(cx); + } + + const bool isNegative = false; + + if (DigitBits == 32) { + Digit low = n; + Digit high = n >> 32; + size_t length = high ? 2 : 1; + + BigInt* res = createUninitialized(cx, length, isNegative); + if (!res) { + return nullptr; + } + res->setDigit(0, low); + if (high) { + res->setDigit(1, high); + } + return res; + } + + return createFromDigit(cx, n, isNegative); +} + +BigInt* BigInt::createFromInt64(JSContext* cx, int64_t n) { + BigInt* res = createFromUint64(cx, Abs(n)); + if (!res) { + return nullptr; + } + + if (n < 0) { + res->setHeaderFlagBit(SignBit); + } + MOZ_ASSERT(res->isNegative() == (n < 0)); + + return res; +} + +// BigInt proposal section 5.1.2 +BigInt* js::NumberToBigInt(JSContext* cx, double d) { + // Step 1 is an assertion checked by the caller. + // Step 2. + if (!IsInteger(d)) { + char str[JS::MaximumNumberToStringLength]; + JS::NumberToString(d, str); + + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_NONINTEGER_NUMBER_TO_BIGINT, str); + return nullptr; + } + + // Step 3. + return BigInt::createFromDouble(cx, d); +} + +BigInt* BigInt::copy(JSContext* cx, HandleBigInt x, gc::InitialHeap heap) { + if (x->isZero()) { + return zero(cx, heap); + } + + BigInt* result = + createUninitialized(cx, x->digitLength(), x->isNegative(), heap); + if (!result) { + return nullptr; + } + for (size_t i = 0; i < x->digitLength(); i++) { + result->setDigit(i, x->digit(i)); + } + return result; +} + +// BigInt proposal section 1.1.7 +BigInt* BigInt::add(JSContext* cx, HandleBigInt x, HandleBigInt y) { + bool xNegative = x->isNegative(); + + // x + y == x + y + // -x + -y == -(x + y) + if (xNegative == y->isNegative()) { + return absoluteAdd(cx, x, y, xNegative); + } + + // x + -y == x - y == -(y - x) + // -x + y == y - x == -(x - y) + int8_t compare = absoluteCompare(x, y); + if (compare == 0) { + return zero(cx); + } + + if (compare > 0) { + return absoluteSub(cx, x, y, xNegative); + } + + return absoluteSub(cx, y, x, !xNegative); +} + +// BigInt proposal section 1.1.8 +BigInt* BigInt::sub(JSContext* cx, HandleBigInt x, HandleBigInt y) { + bool xNegative = x->isNegative(); + if (xNegative != y->isNegative()) { + // x - (-y) == x + y + // (-x) - y == -(x + y) + return absoluteAdd(cx, x, y, xNegative); + } + + // x - y == -(y - x) + // (-x) - (-y) == y - x == -(x - y) + int8_t compare = absoluteCompare(x, y); + if (compare == 0) { + return zero(cx); + } + + if (compare > 0) { + return absoluteSub(cx, x, y, xNegative); + } + + return absoluteSub(cx, y, x, !xNegative); +} + +// BigInt proposal section 1.1.4 +BigInt* BigInt::mul(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero()) { + return x; + } + if (y->isZero()) { + return y; + } + + bool resultNegative = x->isNegative() != y->isNegative(); + + // Fast path for the likely-common case of up to a uint64_t of magnitude. + if (x->absFitsInUint64() && y->absFitsInUint64()) { + uint64_t lhs = x->uint64FromAbsNonZero(); + uint64_t rhs = y->uint64FromAbsNonZero(); + + uint64_t res; + if (js::SafeMul(lhs, rhs, &res)) { + MOZ_ASSERT(res != 0); + return createFromNonZeroRawUint64(cx, res, resultNegative); + } + } + + unsigned resultLength = x->digitLength() + y->digitLength(); + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + result->initializeDigitsToZero(); + + for (size_t i = 0; i < x->digitLength(); i++) { + multiplyAccumulate(y, x->digit(i), result, i); + } + + return destructivelyTrimHighZeroDigits(cx, result); +} + +// BigInt proposal section 1.1.5 +BigInt* BigInt::div(JSContext* cx, HandleBigInt x, HandleBigInt y) { + // 1. If y is 0n, throw a RangeError exception. + if (y->isZero()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_DIVISION_BY_ZERO); + return nullptr; + } + + // 2. Let quotient be the mathematical value of x divided by y. + // 3. Return a BigInt representing quotient rounded towards 0 to the next + // integral value. + if (x->isZero()) { + return x; + } + + if (absoluteCompare(x, y) < 0) { + return zero(cx); + } + + RootedBigInt quotient(cx); + bool resultNegative = x->isNegative() != y->isNegative(); + if (y->digitLength() == 1) { + Digit divisor = y->digit(0); + if (divisor == 1) { + return resultNegative == x->isNegative() ? x : neg(cx, x); + } + + Digit remainder; + if (!absoluteDivWithDigitDivisor(cx, x, divisor, Some("ient), + &remainder, resultNegative)) { + return nullptr; + } + } else { + if (!absoluteDivWithBigIntDivisor(cx, x, y, Some("ient), Nothing(), + resultNegative)) { + return nullptr; + } + } + + return destructivelyTrimHighZeroDigits(cx, quotient); +} + +// BigInt proposal section 1.1.6 +BigInt* BigInt::mod(JSContext* cx, HandleBigInt x, HandleBigInt y) { + // 1. If y is 0n, throw a RangeError exception. + if (y->isZero()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_DIVISION_BY_ZERO); + return nullptr; + } + + // 2. If x is 0n, return x. + if (x->isZero()) { + return x; + } + // 3. Let r be the BigInt defined by the mathematical relation r = x - (y × + // q) where q is a BigInt that is negative only if x/y is negative and + // positive only if x/y is positive, and whose magnitude is as large as + // possible without exceeding the magnitude of the true mathematical + // quotient of x and y. + if (absoluteCompare(x, y) < 0) { + return x; + } + + if (y->digitLength() == 1) { + Digit divisor = y->digit(0); + if (divisor == 1) { + return zero(cx); + } + + Digit remainderDigit; + bool unusedQuotientNegative = false; + if (!absoluteDivWithDigitDivisor(cx, x, divisor, Nothing(), &remainderDigit, + unusedQuotientNegative)) { + MOZ_CRASH("BigInt div by digit failed unexpectedly"); + } + + if (!remainderDigit) { + return zero(cx); + } + + return createFromDigit(cx, remainderDigit, x->isNegative()); + } else { + RootedBigInt remainder(cx); + if (!absoluteDivWithBigIntDivisor(cx, x, y, Nothing(), Some(&remainder), + x->isNegative())) { + return nullptr; + } + MOZ_ASSERT(remainder); + return destructivelyTrimHighZeroDigits(cx, remainder); + } +} + +// BigInt proposal section 1.1.3 +BigInt* BigInt::pow(JSContext* cx, HandleBigInt x, HandleBigInt y) { + // 1. If exponent is < 0, throw a RangeError exception. + if (y->isNegative()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_NEGATIVE_EXPONENT); + return nullptr; + } + + // 2. If base is 0n and exponent is 0n, return 1n. + if (y->isZero()) { + return one(cx); + } + + if (x->isZero()) { + return x; + } + + // 3. Return a BigInt representing the mathematical value of base raised + // to the power exponent. + if (x->digitLength() == 1 && x->digit(0) == 1) { + // (-1) ** even_number == 1. + if (x->isNegative() && (y->digit(0) & 1) == 0) { + return neg(cx, x); + } + // (-1) ** odd_number == -1; 1 ** anything == 1. + return x; + } + + // For all bases >= 2, very large exponents would lead to unrepresentable + // results. + static_assert(MaxBitLength < std::numeric_limits::max(), + "unexpectedly large MaxBitLength"); + if (y->digitLength() > 1) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + Digit exponent = y->digit(0); + if (exponent == 1) { + return x; + } + if (exponent >= MaxBitLength) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + + static_assert(MaxBitLength <= std::numeric_limits::max(), + "unexpectedly large MaxBitLength"); + int n = static_cast(exponent); + bool isOddPower = n & 1; + + if (x->digitLength() == 1 && mozilla::IsPowerOfTwo(x->digit(0))) { + // Fast path for (2^m)^n. + + // Result is negative for odd powers. + bool resultNegative = x->isNegative() && isOddPower; + + unsigned m = mozilla::FloorLog2(x->digit(0)); + MOZ_ASSERT(m < DigitBits); + + static_assert(MaxBitLength * DigitBits > MaxBitLength, + "n * m can't overflow"); + n *= int(m); + + int length = 1 + (n / DigitBits); + BigInt* result = createUninitialized(cx, length, resultNegative); + if (!result) { + return nullptr; + } + result->initializeDigitsToZero(); + result->setDigit(length - 1, static_cast(1) << (n % DigitBits)); + return result; + } + + RootedBigInt runningSquare(cx, x); + RootedBigInt result(cx, isOddPower ? x : nullptr); + n /= 2; + + // Fast path for the likely-common case of up to a uint64_t of magnitude. + if (x->absFitsInUint64()) { + bool resultNegative = x->isNegative() && isOddPower; + + uint64_t runningSquareInt = x->uint64FromAbsNonZero(); + uint64_t resultInt = isOddPower ? runningSquareInt : 1; + while (true) { + uint64_t runningSquareStart = runningSquareInt; + uint64_t r; + if (!js::SafeMul(runningSquareInt, runningSquareInt, &r)) { + break; + } + runningSquareInt = r; + + if (n & 1) { + if (!js::SafeMul(resultInt, runningSquareInt, &r)) { + // Recover |runningSquare| before we restart the loop. + runningSquareInt = runningSquareStart; + break; + } + resultInt = r; + } + + n /= 2; + if (n == 0) { + return createFromNonZeroRawUint64(cx, resultInt, resultNegative); + } + } + + runningSquare = createFromNonZeroRawUint64(cx, runningSquareInt, false); + if (!runningSquare) { + return nullptr; + } + + result = createFromNonZeroRawUint64(cx, resultInt, resultNegative); + if (!result) { + return nullptr; + } + } + + // This implicitly sets the result's sign correctly. + while (true) { + runningSquare = mul(cx, runningSquare, runningSquare); + if (!runningSquare) { + return nullptr; + } + + if (n & 1) { + if (!result) { + result = runningSquare; + } else { + result = mul(cx, result, runningSquare); + if (!result) { + return nullptr; + } + } + } + + n /= 2; + if (n == 0) { + return result; + } + } +} + +BigInt* BigInt::lshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero() || y->isZero()) { + return x; + } + + if (y->digitLength() > 1 || y->digit(0) > MaxBitLength) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + Digit shift = y->digit(0); + int digitShift = static_cast(shift / DigitBits); + int bitsShift = static_cast(shift % DigitBits); + int length = x->digitLength(); + bool grow = bitsShift && (x->digit(length - 1) >> (DigitBits - bitsShift)); + int resultLength = length + digitShift + grow; + BigInt* result = createUninitialized(cx, resultLength, x->isNegative()); + if (!result) { + return nullptr; + } + + int i = 0; + for (; i < digitShift; i++) { + result->setDigit(i, 0); + } + + if (bitsShift == 0) { + for (int j = 0; i < resultLength; i++, j++) { + result->setDigit(i, x->digit(j)); + } + } else { + Digit carry = 0; + for (int j = 0; j < length; i++, j++) { + Digit d = x->digit(j); + result->setDigit(i, (d << bitsShift) | carry); + carry = d >> (DigitBits - bitsShift); + } + if (grow) { + result->setDigit(i, carry); + } else { + MOZ_ASSERT(!carry); + } + } + return result; +} + +BigInt* BigInt::rshByMaximum(JSContext* cx, bool isNegative) { + return isNegative ? negativeOne(cx) : zero(cx); +} + +BigInt* BigInt::rshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero() || y->isZero()) { + return x; + } + + if (y->digitLength() > 1 || y->digit(0) >= MaxBitLength) { + return rshByMaximum(cx, x->isNegative()); + } + Digit shift = y->digit(0); + int length = x->digitLength(); + int digitShift = static_cast(shift / DigitBits); + int bitsShift = static_cast(shift % DigitBits); + int resultLength = length - digitShift; + if (resultLength <= 0) { + return rshByMaximum(cx, x->isNegative()); + } + // For negative numbers, round down if any bit was shifted out (so that e.g. + // -5n >> 1n == -3n and not -2n). Check now whether this will happen and + // whether it can cause overflow into a new digit. If we allocate the result + // large enough up front, it avoids having to do a second allocation later. + bool mustRoundDown = false; + if (x->isNegative()) { + const Digit mask = (static_cast(1) << bitsShift) - 1; + if ((x->digit(digitShift) & mask)) { + mustRoundDown = true; + } else { + for (int i = 0; i < digitShift; i++) { + if (x->digit(i)) { + mustRoundDown = true; + break; + } + } + } + } + // If bits_shift is non-zero, it frees up bits, preventing overflow. + if (mustRoundDown && bitsShift == 0) { + // Overflow cannot happen if the most significant digit has unset bits. + Digit msd = x->digit(length - 1); + bool roundingCanOverflow = msd == std::numeric_limits::max(); + if (roundingCanOverflow) { + resultLength++; + } + } + + MOZ_ASSERT(resultLength <= length); + RootedBigInt result(cx, + createUninitialized(cx, resultLength, x->isNegative())); + if (!result) { + return nullptr; + } + if (!bitsShift) { + // If roundingCanOverflow, manually initialize the overflow digit. + result->setDigit(resultLength - 1, 0); + for (int i = digitShift; i < length; i++) { + result->setDigit(i - digitShift, x->digit(i)); + } + } else { + Digit carry = x->digit(digitShift) >> bitsShift; + int last = length - digitShift - 1; + for (int i = 0; i < last; i++) { + Digit d = x->digit(i + digitShift + 1); + result->setDigit(i, (d << (DigitBits - bitsShift)) | carry); + carry = d >> bitsShift; + } + result->setDigit(last, carry); + } + + if (mustRoundDown) { + MOZ_ASSERT(x->isNegative()); + // Since the result is negative, rounding down means adding one to + // its absolute value. This cannot overflow. TODO: modify the result in + // place. + return absoluteAddOne(cx, result, x->isNegative()); + } + return destructivelyTrimHighZeroDigits(cx, result); +} + +// BigInt proposal section 1.1.9. BigInt::leftShift ( x, y ) +BigInt* BigInt::lsh(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (y->isNegative()) { + return rshByAbsolute(cx, x, y); + } + return lshByAbsolute(cx, x, y); +} + +// BigInt proposal section 1.1.10. BigInt::signedRightShift ( x, y ) +BigInt* BigInt::rsh(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (y->isNegative()) { + return lshByAbsolute(cx, x, y); + } + return rshByAbsolute(cx, x, y); +} + +// BigInt proposal section 1.1.17. BigInt::bitwiseAND ( x, y ) +BigInt* BigInt::bitAnd(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero()) { + return x; + } + + if (y->isZero()) { + return y; + } + + if (!x->isNegative() && !y->isNegative()) { + return absoluteAnd(cx, x, y); + } + + if (x->isNegative() && y->isNegative()) { + // (-x) & (-y) == ~(x-1) & ~(y-1) == ~((x-1) | (y-1)) + // == -(((x-1) | (y-1)) + 1) + RootedBigInt x1(cx, absoluteSubOne(cx, x)); + if (!x1) { + return nullptr; + } + RootedBigInt y1(cx, absoluteSubOne(cx, y)); + if (!y1) { + return nullptr; + } + RootedBigInt result(cx, absoluteOr(cx, x1, y1)); + if (!result) { + return nullptr; + } + bool resultNegative = true; + return absoluteAddOne(cx, result, resultNegative); + } + + MOZ_ASSERT(x->isNegative() != y->isNegative()); + HandleBigInt& pos = x->isNegative() ? y : x; + HandleBigInt& neg = x->isNegative() ? x : y; + + RootedBigInt neg1(cx, absoluteSubOne(cx, neg)); + if (!neg1) { + return nullptr; + } + + // x & (-y) == x & ~(y-1) == x & ~(y-1) + return absoluteAndNot(cx, pos, neg1); +} + +// BigInt proposal section 1.1.18. BigInt::bitwiseXOR ( x, y ) +BigInt* BigInt::bitXor(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero()) { + return y; + } + + if (y->isZero()) { + return x; + } + + if (!x->isNegative() && !y->isNegative()) { + return absoluteXor(cx, x, y); + } + + if (x->isNegative() && y->isNegative()) { + // (-x) ^ (-y) == ~(x-1) ^ ~(y-1) == (x-1) ^ (y-1) + RootedBigInt x1(cx, absoluteSubOne(cx, x)); + if (!x1) { + return nullptr; + } + RootedBigInt y1(cx, absoluteSubOne(cx, y)); + if (!y1) { + return nullptr; + } + return absoluteXor(cx, x1, y1); + } + MOZ_ASSERT(x->isNegative() != y->isNegative()); + + HandleBigInt& pos = x->isNegative() ? y : x; + HandleBigInt& neg = x->isNegative() ? x : y; + + // x ^ (-y) == x ^ ~(y-1) == ~(x ^ (y-1)) == -((x ^ (y-1)) + 1) + RootedBigInt result(cx, absoluteSubOne(cx, neg)); + if (!result) { + return nullptr; + } + result = absoluteXor(cx, result, pos); + if (!result) { + return nullptr; + } + bool resultNegative = true; + return absoluteAddOne(cx, result, resultNegative); +} + +// BigInt proposal section 1.1.19. BigInt::bitwiseOR ( x, y ) +BigInt* BigInt::bitOr(JSContext* cx, HandleBigInt x, HandleBigInt y) { + if (x->isZero()) { + return y; + } + + if (y->isZero()) { + return x; + } + + bool resultNegative = x->isNegative() || y->isNegative(); + + if (!resultNegative) { + return absoluteOr(cx, x, y); + } + + if (x->isNegative() && y->isNegative()) { + // (-x) | (-y) == ~(x-1) | ~(y-1) == ~((x-1) & (y-1)) + // == -(((x-1) & (y-1)) + 1) + RootedBigInt result(cx, absoluteSubOne(cx, x)); + if (!result) { + return nullptr; + } + RootedBigInt y1(cx, absoluteSubOne(cx, y)); + if (!y1) { + return nullptr; + } + result = absoluteAnd(cx, result, y1); + if (!result) { + return nullptr; + } + return absoluteAddOne(cx, result, resultNegative); + } + + MOZ_ASSERT(x->isNegative() != y->isNegative()); + HandleBigInt& pos = x->isNegative() ? y : x; + HandleBigInt& neg = x->isNegative() ? x : y; + + // x | (-y) == x | ~(y-1) == ~((y-1) &~ x) == -(((y-1) &~ x) + 1) + RootedBigInt result(cx, absoluteSubOne(cx, neg)); + if (!result) { + return nullptr; + } + result = absoluteAndNot(cx, result, pos); + if (!result) { + return nullptr; + } + return absoluteAddOne(cx, result, resultNegative); +} + +// BigInt proposal section 1.1.2. BigInt::bitwiseNOT ( x ) +BigInt* BigInt::bitNot(JSContext* cx, HandleBigInt x) { + if (x->isNegative()) { + // ~(-x) == ~(~(x-1)) == x-1 + return absoluteSubOne(cx, x); + } else { + // ~x == -x-1 == -(x+1) + bool resultNegative = true; + return absoluteAddOne(cx, x, resultNegative); + } +} + +int64_t BigInt::toInt64(BigInt* x) { return WrapToSigned(toUint64(x)); } + +uint64_t BigInt::toUint64(BigInt* x) { + if (x->isZero()) { + return 0; + } + + uint64_t digit = x->uint64FromAbsNonZero(); + + // Return the two's complement if x is negative. + if (x->isNegative()) { + return ~(digit - 1); + } + + return digit; +} + +bool BigInt::isInt64(BigInt* x, int64_t* result) { + MOZ_MAKE_MEM_UNDEFINED(result, sizeof(*result)); + + if (!x->absFitsInUint64()) { + return false; + } + + if (x->isZero()) { + *result = 0; + return true; + } + + uint64_t magnitude = x->uint64FromAbsNonZero(); + + if (x->isNegative()) { + constexpr uint64_t Int64MinMagnitude = uint64_t(1) << 63; + if (magnitude <= Int64MinMagnitude) { + *result = magnitude == Int64MinMagnitude + ? std::numeric_limits::min() + : -AssertedCast(magnitude); + return true; + } + } else { + if (magnitude <= + static_cast(std::numeric_limits::max())) { + *result = AssertedCast(magnitude); + return true; + } + } + + return false; +} + +// Compute `2**bits - (x & (2**bits - 1))`. Used when treating BigInt values as +// arbitrary-precision two's complement signed integers. +BigInt* BigInt::truncateAndSubFromPowerOfTwo(JSContext* cx, HandleBigInt x, + uint64_t bits, + bool resultNegative) { + MOZ_ASSERT(bits != 0); + MOZ_ASSERT(!x->isZero()); + + if (bits > MaxBitLength) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_TOO_LARGE); + return nullptr; + } + + size_t resultLength = CeilDiv(bits, DigitBits); + BigInt* result = createUninitialized(cx, resultLength, resultNegative); + if (!result) { + return nullptr; + } + + // Process all digits except the MSD. + size_t xLength = x->digitLength(); + Digit borrow = 0; + // Take digits from `x` until its length is exhausted. + for (size_t i = 0; i < std::min(resultLength - 1, xLength); i++) { + Digit newBorrow = 0; + Digit difference = digitSub(0, x->digit(i), &newBorrow); + difference = digitSub(difference, borrow, &newBorrow); + result->setDigit(i, difference); + borrow = newBorrow; + } + // Then simulate leading zeroes in `x` as needed. + for (size_t i = xLength; i < resultLength - 1; i++) { + Digit newBorrow = 0; + Digit difference = digitSub(0, borrow, &newBorrow); + result->setDigit(i, difference); + borrow = newBorrow; + } + + // The MSD might contain extra bits that we don't want. + Digit xMSD = resultLength <= xLength ? x->digit(resultLength - 1) : 0; + Digit resultMSD; + if (bits % DigitBits == 0) { + Digit newBorrow = 0; + resultMSD = digitSub(0, xMSD, &newBorrow); + resultMSD = digitSub(resultMSD, borrow, &newBorrow); + } else { + size_t drop = DigitBits - (bits % DigitBits); + xMSD = (xMSD << drop) >> drop; + Digit minuendMSD = Digit(1) << (DigitBits - drop); + Digit newBorrow = 0; + resultMSD = digitSub(minuendMSD, xMSD, &newBorrow); + resultMSD = digitSub(resultMSD, borrow, &newBorrow); + MOZ_ASSERT(newBorrow == 0, "result < 2^bits"); + // If all subtracted bits were zero, we have to get rid of the + // materialized minuendMSD again. + resultMSD &= (minuendMSD - 1); + } + result->setDigit(resultLength - 1, resultMSD); + + return destructivelyTrimHighZeroDigits(cx, result); +} + +BigInt* BigInt::asUintN(JSContext* cx, HandleBigInt x, uint64_t bits) { + if (x->isZero()) { + return x; + } + + if (bits == 0) { + return zero(cx); + } + + // When truncating a negative number, simulate two's complement. + if (x->isNegative()) { + bool resultNegative = false; + return truncateAndSubFromPowerOfTwo(cx, x, bits, resultNegative); + } + + if (bits <= 64) { + uint64_t u64 = toUint64(x); + uint64_t mask = uint64_t(-1) >> (64 - bits); + uint64_t n = u64 & mask; + if (u64 == n && x->absFitsInUint64()) { + return x; + } + return createFromUint64(cx, n); + } + + if (bits >= MaxBitLength) { + return x; + } + + Digit msd = x->digit(x->digitLength() - 1); + size_t msdBits = DigitBits - DigitLeadingZeroes(msd); + size_t bitLength = msdBits + (x->digitLength() - 1) * DigitBits; + + if (bits >= bitLength) { + return x; + } + + size_t length = CeilDiv(bits, DigitBits); + MOZ_ASSERT(length >= 2, "single-digit cases should be handled above"); + MOZ_ASSERT(length <= x->digitLength()); + + // Eagerly trim high zero digits. + const size_t highDigitBits = ((bits - 1) % DigitBits) + 1; + const Digit highDigitMask = Digit(-1) >> (DigitBits - highDigitBits); + Digit mask = highDigitMask; + while (length > 0) { + if (x->digit(length - 1) & mask) { + break; + } + + mask = Digit(-1); + length--; + } + + const bool isNegative = false; + BigInt* res = createUninitialized(cx, length, isNegative); + if (res == nullptr) { + return nullptr; + } + + while (length-- > 0) { + res->setDigit(length, x->digit(length) & mask); + mask = Digit(-1); + } + MOZ_ASSERT_IF(length == 0, res->isZero()); + + return res; +} + +BigInt* BigInt::asIntN(JSContext* cx, HandleBigInt x, uint64_t bits) { + if (x->isZero()) { + return x; + } + + if (bits == 0) { + return zero(cx); + } + + if (bits == 64) { + int64_t n = toInt64(x); + if (((n < 0) == x->isNegative()) && x->absFitsInUint64()) { + return x; + } + return createFromInt64(cx, n); + } + + if (bits > MaxBitLength) { + return x; + } + + Digit msd = x->digit(x->digitLength() - 1); + size_t msdBits = DigitBits - DigitLeadingZeroes(msd); + size_t bitLength = msdBits + (x->digitLength() - 1) * DigitBits; + + if (bits > bitLength) { + return x; + } + + Digit signBit = Digit(1) << ((bits - 1) % DigitBits); + if (bits == bitLength && msd < signBit) { + return x; + } + + // All the cases above were the trivial cases: truncating zero, or to zero + // bits, or to more bits than are in `x` (so we return `x` directly), or we + // already have the 64-bit fast path. If we get here, follow the textbook + // algorithm from the specification. + + // BigInt.asIntN step 3: Let `mod` be `x` modulo `2**bits`. + RootedBigInt mod(cx, asUintN(cx, x, bits)); + if (!mod) { + return nullptr; + } + + // Step 4: If `mod >= 2**(bits - 1)`, return `mod - 2**bits`; otherwise, + // return `mod`. + if (mod->digitLength() == CeilDiv(bits, DigitBits)) { + MOZ_ASSERT(!mod->isZero(), + "nonzero bits implies nonzero digit length which implies " + "nonzero overall"); + + if ((mod->digit(mod->digitLength() - 1) & signBit) != 0) { + bool resultNegative = true; + return truncateAndSubFromPowerOfTwo(cx, mod, bits, resultNegative); + } + } + + return mod; +} + +static bool ValidBigIntOperands(JSContext* cx, HandleValue lhs, + HandleValue rhs) { + MOZ_ASSERT(lhs.isBigInt() || rhs.isBigInt()); + + if (!lhs.isBigInt() || !rhs.isBigInt()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_TO_NUMBER); + return false; + } + + return true; +} + +bool BigInt::addValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::add(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::subValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::sub(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::mulValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::mul(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::divValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::div(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::modValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::mod(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::powValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::pow(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::negValue(JSContext* cx, HandleValue operand, + MutableHandleValue res) { + MOZ_ASSERT(operand.isBigInt()); + + RootedBigInt operandBigInt(cx, operand.toBigInt()); + BigInt* resBigInt = BigInt::neg(cx, operandBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::incValue(JSContext* cx, HandleValue operand, + MutableHandleValue res) { + MOZ_ASSERT(operand.isBigInt()); + + RootedBigInt operandBigInt(cx, operand.toBigInt()); + BigInt* resBigInt = BigInt::inc(cx, operandBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::decValue(JSContext* cx, HandleValue operand, + MutableHandleValue res) { + MOZ_ASSERT(operand.isBigInt()); + + RootedBigInt operandBigInt(cx, operand.toBigInt()); + BigInt* resBigInt = BigInt::dec(cx, operandBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::lshValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::lsh(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::rshValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::rsh(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::bitAndValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::bitAnd(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::bitXorValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::bitXor(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::bitOrValue(JSContext* cx, HandleValue lhs, HandleValue rhs, + MutableHandleValue res) { + if (!ValidBigIntOperands(cx, lhs, rhs)) { + return false; + } + + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + BigInt* resBigInt = BigInt::bitOr(cx, lhsBigInt, rhsBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +bool BigInt::bitNotValue(JSContext* cx, HandleValue operand, + MutableHandleValue res) { + MOZ_ASSERT(operand.isBigInt()); + + RootedBigInt operandBigInt(cx, operand.toBigInt()); + BigInt* resBigInt = BigInt::bitNot(cx, operandBigInt); + if (!resBigInt) { + return false; + } + res.setBigInt(resBigInt); + return true; +} + +// BigInt proposal section 7.3 +BigInt* js::ToBigInt(JSContext* cx, HandleValue val) { + RootedValue v(cx, val); + + // Step 1. + if (!ToPrimitive(cx, JSTYPE_NUMBER, &v)) { + return nullptr; + } + + // Step 2. + if (v.isBigInt()) { + return v.toBigInt(); + } + + if (v.isBoolean()) { + return v.toBoolean() ? BigInt::one(cx) : BigInt::zero(cx); + } + + if (v.isString()) { + RootedString str(cx, v.toString()); + BigInt* bi; + JS_TRY_VAR_OR_RETURN_NULL(cx, bi, StringToBigInt(cx, str)); + if (!bi) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_INVALID_SYNTAX); + return nullptr; + } + return bi; + } + + ReportValueError(cx, JSMSG_CANT_CONVERT_TO, JSDVG_IGNORE_STACK, v, nullptr, + "BigInt"); + return nullptr; +} + +JS::Result js::ToBigInt64(JSContext* cx, HandleValue v) { + BigInt* bi = js::ToBigInt(cx, v); + if (!bi) { + return cx->alreadyReportedError(); + } + return BigInt::toInt64(bi); +} + +JS::Result js::ToBigUint64(JSContext* cx, HandleValue v) { + BigInt* bi = js::ToBigInt(cx, v); + if (!bi) { + return cx->alreadyReportedError(); + } + return BigInt::toUint64(bi); +} + +double BigInt::numberValue(BigInt* x) { + if (x->isZero()) { + return 0.0; + } + + using Double = mozilla::FloatingPoint; + constexpr uint8_t ExponentShift = Double::kExponentShift; + constexpr uint8_t SignificandWidth = Double::kSignificandWidth; + constexpr unsigned ExponentBias = Double::kExponentBias; + constexpr uint8_t SignShift = Double::kExponentWidth + SignificandWidth; + + MOZ_ASSERT(x->digitLength() > 0); + + // Fast path for the likely-common case of up to a uint64_t of magnitude not + // exceeding integral precision in IEEE-754. (Note that we *depend* on this + // optimization being performed further down.) + if (x->absFitsInUint64()) { + uint64_t magnitude = x->uint64FromAbsNonZero(); + const uint64_t MaxIntegralPrecisionDouble = uint64_t(1) + << (SignificandWidth + 1); + if (magnitude <= MaxIntegralPrecisionDouble) { + return x->isNegative() ? -double(magnitude) : +double(magnitude); + } + } + + size_t length = x->digitLength(); + Digit msd = x->digit(length - 1); + uint8_t msdLeadingZeroes = DigitLeadingZeroes(msd); + + // `2**ExponentBias` is the largest power of two in a finite IEEE-754 + // double. If this bigint has a greater power of two, it'll round to + // infinity. + uint64_t exponent = length * DigitBits - msdLeadingZeroes - 1; + if (exponent > ExponentBias) { + return x->isNegative() ? mozilla::NegativeInfinity() + : mozilla::PositiveInfinity(); + } + + // Otherwise munge the most significant bits of the number into proper + // position in an IEEE-754 double and go to town. + + // Omit the most significant bit: the IEEE-754 format includes this bit + // implicitly for all double-precision integers. + const uint8_t msdIgnoredBits = msdLeadingZeroes + 1; + const uint8_t msdIncludedBits = DigitBits - msdIgnoredBits; + + // We compute the final mantissa of the result, shifted upward to the top of + // the `uint64_t` space -- plus an extra bit to detect potential rounding. + constexpr uint8_t BitsNeededForShiftedMantissa = SignificandWidth + 1; + + // Shift `msd`'s contributed bits upward to remove high-order zeroes and the + // highest set bit (which is implicit in IEEE-754 integral values so must be + // removed) and to add low-order zeroes. (Lower-order garbage bits are + // discarded when `shiftedMantissa` is converted to a real mantissa.) + uint64_t shiftedMantissa = + msdIncludedBits == 0 ? 0 : uint64_t(msd) << (64 - msdIncludedBits); + + // If the extra bit is set, correctly rounding the result may require + // examining all lower-order bits. Also compute 1) the index of the Digit + // storing the extra bit, and 2) whether bits beneath the extra bit in that + // Digit are nonzero so we can round if needed. + size_t digitContainingExtraBit; + Digit bitsBeneathExtraBitInDigitContainingExtraBit; + + // Add shifted bits to `shiftedMantissa` until we have a complete mantissa and + // an extra bit. + if (msdIncludedBits >= BitsNeededForShiftedMantissa) { + // DigitBits=64 (necessarily for msdIncludedBits ≥ SignificandWidth+1; + // | C++ compiler range analysis ought eliminate this + // | check on 32-bit) + // _________|__________ + // / | + // msdIncludedBits + // ________|________ + // / | + // [001···················| + // \_/\_____________/\__| + // | | | + // msdIgnoredBits | bits below the extra bit (may be no bits) + // BitsNeededForShiftedMantissa=SignificandWidth+1 + digitContainingExtraBit = length - 1; + + const uint8_t countOfBitsInDigitBelowExtraBit = + DigitBits - BitsNeededForShiftedMantissa - msdIgnoredBits; + bitsBeneathExtraBitInDigitContainingExtraBit = + msd & ((Digit(1) << countOfBitsInDigitBelowExtraBit) - 1); + } else { + MOZ_ASSERT(length >= 2, + "single-Digit numbers with this few bits should have been " + "handled by the fast-path above"); + + Digit second = x->digit(length - 2); + if (DigitBits == 64) { + shiftedMantissa |= second >> msdIncludedBits; + + digitContainingExtraBit = length - 2; + + // msdIncludedBits + DigitBits + // ________|_________ + // / | + // DigitBits=64 + // msdIncludedBits | + // __|___ _____|___ + // / \ / | + // [001········|···········| + // \_/\_____________/\___| + // | | | + // msdIgnoredBits | bits below the extra bit (always more than one) + // | + // BitsNeededForShiftedMantissa=SignificandWidth+1 + const uint8_t countOfBitsInSecondDigitBelowExtraBit = + (msdIncludedBits + DigitBits) - BitsNeededForShiftedMantissa; + + bitsBeneathExtraBitInDigitContainingExtraBit = + second << (DigitBits - countOfBitsInSecondDigitBelowExtraBit); + } else { + shiftedMantissa |= uint64_t(second) << msdIgnoredBits; + + if (msdIncludedBits + DigitBits >= BitsNeededForShiftedMantissa) { + digitContainingExtraBit = length - 2; + + // msdIncludedBits + DigitBits + // ______|________ + // / | + // DigitBits=32 + // msdIncludedBits | + // _|_ _____|___ + // / \ / | + // [001·····|···········| + // \___________/\__| + // | | + // | bits below the extra bit (may be no bits) + // BitsNeededForShiftedMantissa=SignificandWidth+1 + const uint8_t countOfBitsInSecondDigitBelowExtraBit = + (msdIncludedBits + DigitBits) - BitsNeededForShiftedMantissa; + + bitsBeneathExtraBitInDigitContainingExtraBit = + second & ((Digit(1) << countOfBitsInSecondDigitBelowExtraBit) - 1); + } else { + MOZ_ASSERT(length >= 3, + "we must have at least three digits here, because " + "`msdIncludedBits + 32 < BitsNeededForShiftedMantissa` " + "guarantees `x < 2**53` -- and therefore the " + "MaxIntegralPrecisionDouble optimization above will have " + "handled two-digit cases"); + + Digit third = x->digit(length - 3); + shiftedMantissa |= uint64_t(third) >> msdIncludedBits; + + digitContainingExtraBit = length - 3; + + // msdIncludedBits + DigitBits + DigitBits + // ____________|______________ + // / | + // DigitBits=32 + // msdIncludedBits | DigitBits=32 + // _|_ _____|___ ____|____ + // / \ / \ / | + // [001·····|···········|···········| + // \____________________/\_____| + // | | + // | bits below the extra bit + // BitsNeededForShiftedMantissa=SignificandWidth+1 + static_assert(2 * DigitBits > BitsNeededForShiftedMantissa, + "two 32-bit digits should more than fill a mantissa"); + const uint8_t countOfBitsInThirdDigitBelowExtraBit = + msdIncludedBits + 2 * DigitBits - BitsNeededForShiftedMantissa; + + // Shift out the mantissa bits and the extra bit. + bitsBeneathExtraBitInDigitContainingExtraBit = + third << (DigitBits - countOfBitsInThirdDigitBelowExtraBit); + } + } + } + + constexpr uint64_t LeastSignificantBit = uint64_t(1) + << (64 - SignificandWidth); + constexpr uint64_t ExtraBit = LeastSignificantBit >> 1; + + // The extra bit must be set for rounding to change the mantissa. + if ((shiftedMantissa & ExtraBit) != 0) { + bool shouldRoundUp; + if (shiftedMantissa & LeastSignificantBit) { + // If the lowest mantissa bit is set, it doesn't matter what lower bits + // are: nearest-even rounds up regardless. + shouldRoundUp = true; + } else { + // If the lowest mantissa bit is unset, *all* lower bits are relevant. + // All-zero bits below the extra bit situates `x` halfway between two + // values, and the nearest *even* value lies downward. But if any bit + // below the extra bit is set, `x` is closer to the rounded-up value. + shouldRoundUp = bitsBeneathExtraBitInDigitContainingExtraBit != 0; + if (!shouldRoundUp) { + while (digitContainingExtraBit-- > 0) { + if (x->digit(digitContainingExtraBit) != 0) { + shouldRoundUp = true; + break; + } + } + } + } + + if (shouldRoundUp) { + // Add one to the significand bits. If they overflow, the exponent must + // also be increased. If *that* overflows, return the correct infinity. + uint64_t before = shiftedMantissa; + shiftedMantissa += ExtraBit; + if (shiftedMantissa < before) { + exponent++; + if (exponent > ExponentBias) { + return x->isNegative() ? NegativeInfinity() + : PositiveInfinity(); + } + } + } + } + + uint64_t significandBits = shiftedMantissa >> (64 - SignificandWidth); + uint64_t signBit = uint64_t(x->isNegative() ? 1 : 0) << SignShift; + uint64_t exponentBits = (exponent + ExponentBias) << ExponentShift; + return mozilla::BitwiseCast(signBit | exponentBits | significandBits); +} + +int8_t BigInt::compare(BigInt* x, BigInt* y) { + // Sanity checks to catch negative zeroes escaping to the wild. + MOZ_ASSERT(!x->isNegative() || !x->isZero()); + MOZ_ASSERT(!y->isNegative() || !y->isZero()); + + bool xSign = x->isNegative(); + + if (xSign != y->isNegative()) { + return xSign ? -1 : 1; + } + + if (xSign) { + std::swap(x, y); + } + + return absoluteCompare(x, y); +} + +bool BigInt::equal(BigInt* lhs, BigInt* rhs) { + if (lhs == rhs) { + return true; + } + if (lhs->digitLength() != rhs->digitLength()) { + return false; + } + if (lhs->isNegative() != rhs->isNegative()) { + return false; + } + for (size_t i = 0; i < lhs->digitLength(); i++) { + if (lhs->digit(i) != rhs->digit(i)) { + return false; + } + } + return true; +} + +int8_t BigInt::compare(BigInt* x, double y) { + MOZ_ASSERT(!mozilla::IsNaN(y)); + + constexpr int LessThan = -1, Equal = 0, GreaterThan = 1; + + // ±Infinity exceeds a finite bigint value. + if (!mozilla::IsFinite(y)) { + return y > 0 ? LessThan : GreaterThan; + } + + // Handle `x === 0n` and `y == 0` special cases. + if (x->isZero()) { + if (y == 0) { + // -0 and +0 are treated identically. + return Equal; + } + + return y > 0 ? LessThan : GreaterThan; + } + + const bool xNegative = x->isNegative(); + if (y == 0) { + return xNegative ? LessThan : GreaterThan; + } + + // Nonzero `x` and `y` with different signs are trivially compared. + const bool yNegative = y < 0; + if (xNegative != yNegative) { + return xNegative ? LessThan : GreaterThan; + } + + // `x` and `y` are same-signed. Determine which has greater magnitude, + // then combine that with the signedness just computed to reach a result. + const int exponent = mozilla::ExponentComponent(y); + if (exponent < 0) { + // `y` is a nonzero fraction of magnitude less than 1. + return xNegative ? LessThan : GreaterThan; + } + + size_t xLength = x->digitLength(); + MOZ_ASSERT(xLength > 0); + + Digit xMSD = x->digit(xLength - 1); + const int shift = DigitLeadingZeroes(xMSD); + int xBitLength = xLength * DigitBits - shift; + + // Differing bit-length makes for a simple comparison. + int yBitLength = exponent + 1; + if (xBitLength < yBitLength) { + return xNegative ? GreaterThan : LessThan; + } + if (xBitLength > yBitLength) { + return xNegative ? LessThan : GreaterThan; + } + + // Compare the high 64 bits of both numbers. (Lower-order bits not present + // in either number are zeroed.) Either that distinguishes `x` and `y`, or + // `x` and `y` differ only if a subsequent nonzero bit in `x` means `x` has + // larger magnitude. + + using Double = mozilla::FloatingPoint; + constexpr uint8_t SignificandWidth = Double::kSignificandWidth; + constexpr uint64_t SignificandBits = Double::kSignificandBits; + + const uint64_t doubleBits = mozilla::BitwiseCast(y); + const uint64_t significandBits = doubleBits & SignificandBits; + + // Readd the implicit-one bit when constructing `y`'s high 64 bits. + const uint64_t yHigh64Bits = + ((uint64_t(1) << SignificandWidth) | significandBits) + << (64 - SignificandWidth - 1); + + // Cons up `x`'s high 64 bits, backfilling zeroes for binary fractions of 1 + // if `x` doesn't have 64 bits. + uint8_t xBitsFilled = DigitBits - shift; + uint64_t xHigh64Bits = uint64_t(xMSD) << (64 - xBitsFilled); + + // At this point we no longer need to look at the most significant digit. + xLength--; + + // The high 64 bits from `x` will probably not align to a digit boundary. + // `xHasNonZeroLeftoverBits` will be set to true if any remaining + // least-significant bit from the digit holding xHigh64Bits's + // least-significant bit is nonzero. + bool xHasNonZeroLeftoverBits = false; + + if (xBitsFilled < std::min(xBitLength, 64)) { + MOZ_ASSERT(xLength >= 1, + "If there are more bits to fill, there should be " + "more digits to fill them from"); + + Digit second = x->digit(--xLength); + if (DigitBits == 32) { + xBitsFilled += 32; + xHigh64Bits |= uint64_t(second) << (64 - xBitsFilled); + if (xBitsFilled < 64 && xLength >= 1) { + Digit third = x->digit(--xLength); + const uint8_t neededBits = 64 - xBitsFilled; + xHigh64Bits |= uint64_t(third) >> (DigitBits - neededBits); + xHasNonZeroLeftoverBits = (third << neededBits) != 0; + } + } else { + const uint8_t neededBits = 64 - xBitsFilled; + xHigh64Bits |= uint64_t(second) >> (DigitBits - neededBits); + xHasNonZeroLeftoverBits = (second << neededBits) != 0; + } + } + + // If high bits are unequal, the larger one has greater magnitude. + if (yHigh64Bits > xHigh64Bits) { + return xNegative ? GreaterThan : LessThan; + } + if (xHigh64Bits > yHigh64Bits) { + return xNegative ? LessThan : GreaterThan; + } + + // Otherwise the top 64 bits of both are equal. If the values differ, a + // lower-order bit in `x` is nonzero and `x` has greater magnitude than + // `y`; otherwise `x == y`. + if (xHasNonZeroLeftoverBits) { + return xNegative ? LessThan : GreaterThan; + } + while (xLength != 0) { + if (x->digit(--xLength) != 0) { + return xNegative ? LessThan : GreaterThan; + } + } + + return Equal; +} + +bool BigInt::equal(BigInt* lhs, double rhs) { + if (mozilla::IsNaN(rhs)) { + return false; + } + return compare(lhs, rhs) == 0; +} + +JS::Result BigInt::equal(JSContext* cx, Handle lhs, + HandleString rhs) { + BigInt* rhsBigInt; + MOZ_TRY_VAR(rhsBigInt, StringToBigInt(cx, rhs)); + if (!rhsBigInt) { + return false; + } + return equal(lhs, rhsBigInt); +} + +// BigInt proposal section 3.2.5 +JS::Result BigInt::looselyEqual(JSContext* cx, HandleBigInt lhs, + HandleValue rhs) { + // Step 1. + if (rhs.isBigInt()) { + return equal(lhs, rhs.toBigInt()); + } + + // Steps 2-5 (not applicable). + + // Steps 6-7. + if (rhs.isString()) { + RootedString rhsString(cx, rhs.toString()); + return equal(cx, lhs, rhsString); + } + + // Steps 8-9 (not applicable). + + // Steps 10-11. + if (rhs.isObject()) { + RootedValue rhsPrimitive(cx, rhs); + if (!ToPrimitive(cx, &rhsPrimitive)) { + return cx->alreadyReportedError(); + } + return looselyEqual(cx, lhs, rhsPrimitive); + } + + // Step 12. + if (rhs.isNumber()) { + return equal(lhs, rhs.toNumber()); + } + + // Step 13. + return false; +} + +// BigInt proposal section 1.1.12. BigInt::lessThan ( x, y ) +bool BigInt::lessThan(BigInt* x, BigInt* y) { return compare(x, y) < 0; } + +Maybe BigInt::lessThan(BigInt* lhs, double rhs) { + if (mozilla::IsNaN(rhs)) { + return Maybe(Nothing()); + } + return Some(compare(lhs, rhs) < 0); +} + +Maybe BigInt::lessThan(double lhs, BigInt* rhs) { + if (mozilla::IsNaN(lhs)) { + return Maybe(Nothing()); + } + return Some(-compare(rhs, lhs) < 0); +} + +bool BigInt::lessThan(JSContext* cx, HandleBigInt lhs, HandleString rhs, + Maybe& res) { + BigInt* rhsBigInt; + JS_TRY_VAR_OR_RETURN_FALSE(cx, rhsBigInt, StringToBigInt(cx, rhs)); + if (!rhsBigInt) { + res = Nothing(); + return true; + } + res = Some(lessThan(lhs, rhsBigInt)); + return true; +} + +bool BigInt::lessThan(JSContext* cx, HandleString lhs, HandleBigInt rhs, + Maybe& res) { + BigInt* lhsBigInt; + JS_TRY_VAR_OR_RETURN_FALSE(cx, lhsBigInt, StringToBigInt(cx, lhs)); + if (!lhsBigInt) { + res = Nothing(); + return true; + } + res = Some(lessThan(lhsBigInt, rhs)); + return true; +} + +bool BigInt::lessThan(JSContext* cx, HandleValue lhs, HandleValue rhs, + Maybe& res) { + if (lhs.isBigInt()) { + if (rhs.isString()) { + RootedBigInt lhsBigInt(cx, lhs.toBigInt()); + RootedString rhsString(cx, rhs.toString()); + return lessThan(cx, lhsBigInt, rhsString, res); + } + + if (rhs.isNumber()) { + res = lessThan(lhs.toBigInt(), rhs.toNumber()); + return true; + } + + MOZ_ASSERT(rhs.isBigInt()); + res = Some(lessThan(lhs.toBigInt(), rhs.toBigInt())); + return true; + } + + MOZ_ASSERT(rhs.isBigInt()); + if (lhs.isString()) { + RootedString lhsString(cx, lhs.toString()); + RootedBigInt rhsBigInt(cx, rhs.toBigInt()); + return lessThan(cx, lhsString, rhsBigInt, res); + } + + MOZ_ASSERT(lhs.isNumber()); + res = lessThan(lhs.toNumber(), rhs.toBigInt()); + return true; +} + +template +JSLinearString* BigInt::toString(JSContext* cx, HandleBigInt x, uint8_t radix) { + MOZ_ASSERT(2 <= radix && radix <= 36); + + if (x->isZero()) { + return cx->staticStrings().getInt(0); + } + + if (mozilla::IsPowerOfTwo(radix)) { + return toStringBasePowerOfTwo(cx, x, radix); + } + + if (radix == 10 && x->digitLength() == 1) { + return toStringSingleDigitBaseTen(cx, x->digit(0), + x->isNegative()); + } + + // Punt on doing generic toString without GC. + if (!allowGC) { + return nullptr; + } + + return toStringGeneric(cx, x, radix); +} + +template JSLinearString* BigInt::toString(JSContext* cx, + HandleBigInt x, + uint8_t radix); +template JSLinearString* BigInt::toString(JSContext* cx, + HandleBigInt x, + uint8_t radix); + +template +static inline BigInt* ParseStringBigIntLiteral(JSContext* cx, + Range range, + bool* haveParseError) { + auto start = range.begin(); + auto end = range.end(); + + while (start < end && unicode::IsSpace(start[0])) { + start++; + } + + while (start < end && unicode::IsSpace(end[-1])) { + end--; + } + + if (start == end) { + return BigInt::zero(cx); + } + + // StringNumericLiteral ::: StrDecimalLiteral, but without Infinity, decimal + // points, or exponents. Note that the raw '+' or '-' cases fall through + // because the string is too short, and eventually signal a parse error. + if (end - start > 1) { + if (start[0] == '+') { + bool isNegative = false; + start++; + return BigInt::parseLiteralDigits(cx, Range(start, end), 10, + isNegative, haveParseError); + } + if (start[0] == '-') { + bool isNegative = true; + start++; + return BigInt::parseLiteralDigits(cx, Range(start, end), 10, + isNegative, haveParseError); + } + } + + return BigInt::parseLiteral(cx, Range(start, end), + haveParseError); +} + +// Called from BigInt constructor. +JS::Result js::StringToBigInt(JSContext* cx, + HandleString str) { + JSLinearString* linear = str->ensureLinear(cx); + if (!linear) { + return cx->alreadyReportedOOM(); + } + + AutoStableStringChars chars(cx); + if (!chars.init(cx, str)) { + return cx->alreadyReportedOOM(); + } + + BigInt* res; + bool parseError = false; + if (chars.isLatin1()) { + res = ParseStringBigIntLiteral(cx, chars.latin1Range(), &parseError); + } else { + res = ParseStringBigIntLiteral(cx, chars.twoByteRange(), &parseError); + } + + // A nullptr result can indicate either a parse error or out-of-memory. + if (!res && !parseError) { + return cx->alreadyReportedOOM(); + } + + return res; +} + +// Called from parser with already trimmed and validated token. +BigInt* js::ParseBigIntLiteral(JSContext* cx, + const Range& chars) { + bool parseError = false; + BigInt* res = BigInt::parseLiteral(cx, chars, &parseError); + if (!res) { + return nullptr; + } + MOZ_ASSERT(res->isTenured()); + MOZ_RELEASE_ASSERT(!parseError); + return res; +} + +// Check a already validated numeric literal for a non-zero value. Used by +// the parsers node folder in deferred mode. +bool js::BigIntLiteralIsZero(const mozilla::Range& chars) { + return BigInt::literalIsZero(chars); +} + +template +JSAtom* js::BigIntToAtom(JSContext* cx, HandleBigInt bi) { + JSString* str = BigInt::toString(cx, bi, 10); + if (!str) { + return nullptr; + } + return AtomizeString(cx, str); +} + +template JSAtom* js::BigIntToAtom(JSContext* cx, HandleBigInt bi); +template JSAtom* js::BigIntToAtom(JSContext* cx, HandleBigInt bi); + +#if defined(DEBUG) || defined(JS_JITSPEW) +void BigInt::dump() const { + js::Fprinter out(stderr); + dump(out); +} + +void BigInt::dump(js::GenericPrinter& out) const { + if (isNegative()) { + out.putChar('-'); + } + + if (digitLength() == 0) { + out.put("0"); + } else if (digitLength() == 1) { + uint64_t d = digit(0); + out.printf("%" PRIu64, d); + } else { + out.put("0x"); + for (size_t i = 0; i < digitLength(); i++) { + uint64_t d = digit(digitLength() - i - 1); + if (sizeof(Digit) == 4) { + out.printf("%.8" PRIX32, uint32_t(d)); + } else { + out.printf("%.16" PRIX64, d); + } + } + } + + out.putChar('n'); +} +#endif + +JS::ubi::Node::Size JS::ubi::Concrete::size( + mozilla::MallocSizeOf mallocSizeOf) const { + BigInt& bi = get(); + size_t size = sizeof(JS::BigInt); + if (IsInsideNursery(&bi)) { + size += Nursery::nurseryCellHeaderSize(); + size += bi.sizeOfExcludingThisInNursery(mallocSizeOf); + } else { + size += bi.sizeOfExcludingThis(mallocSizeOf); + } + return size; +} + +template +XDRResult js::XDRBigInt(XDRState* xdr, MutableHandleBigInt bi) { + JSContext* cx = xdr->cx(); + + uint8_t sign; + uint32_t length; + + if (mode == XDR_ENCODE) { + cx->check(bi); + sign = static_cast(bi->isNegative()); + uint64_t sz = bi->digitLength() * sizeof(BigInt::Digit); + // As the maximum source code size is currently UINT32_MAX code units + // (see BytecodeCompiler::checkLength), any bigint literal's length in + // word-sized digits will be less than UINT32_MAX as well. That could + // change or FoldConstants could start creating these though, so leave + // this as a release-enabled assert. + MOZ_RELEASE_ASSERT(sz <= UINT32_MAX); + length = static_cast(sz); + } + + MOZ_TRY(xdr->codeUint8(&sign)); + MOZ_TRY(xdr->codeUint32(&length)); + + MOZ_RELEASE_ASSERT(length % sizeof(BigInt::Digit) == 0); + uint32_t digitLength = length / sizeof(BigInt::Digit); + auto buf = cx->make_pod_array(digitLength); + if (!buf) { + return xdr->fail(JS::TranscodeResult_Throw); + } + + if (mode == XDR_ENCODE) { + std::uninitialized_copy_n(bi->digits().Elements(), digitLength, buf.get()); + } + + MOZ_TRY(xdr->codeBytes(buf.get(), length)); + + if (mode == XDR_DECODE) { + BigInt* res = + BigInt::createUninitialized(cx, digitLength, sign, gc::TenuredHeap); + if (!res) { + return xdr->fail(JS::TranscodeResult_Throw); + } + std::uninitialized_copy_n(buf.get(), digitLength, res->digits().Elements()); + bi.set(res); + } + + return Ok(); +} + +template XDRResult js::XDRBigInt(XDRState* xdr, + MutableHandleBigInt bi); + +template XDRResult js::XDRBigInt(XDRState* xdr, + MutableHandleBigInt bi); + +// Public API + +BigInt* JS::NumberToBigInt(JSContext* cx, double num) { + return js::NumberToBigInt(cx, num); +} + +template +static inline BigInt* StringToBigIntHelper(JSContext* cx, + Range& chars) { + bool parseError = false; + BigInt* bi = ParseStringBigIntLiteral(cx, chars, &parseError); + if (!bi) { + if (parseError) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_INVALID_SYNTAX); + } + return nullptr; + } + MOZ_RELEASE_ASSERT(!parseError); + return bi; +} + +BigInt* JS::StringToBigInt(JSContext* cx, Range chars) { + return StringToBigIntHelper(cx, chars); +} + +BigInt* JS::StringToBigInt(JSContext* cx, Range chars) { + return StringToBigIntHelper(cx, chars); +} + +static inline BigInt* SimpleStringToBigIntHelper( + JSContext* cx, mozilla::Span chars, unsigned radix, + bool* haveParseError) { + if (chars.Length() > 1) { + if (chars[0] == '+') { + return BigInt::parseLiteralDigits( + cx, Range{chars.From(1)}, radix, + /* isNegative = */ false, haveParseError); + } + if (chars[0] == '-') { + return BigInt::parseLiteralDigits( + cx, Range{chars.From(1)}, radix, + /* isNegative = */ true, haveParseError); + } + } + + return BigInt::parseLiteralDigits(cx, Range{chars}, radix, + /* isNegative = */ false, haveParseError); +} + +BigInt* JS::SimpleStringToBigInt(JSContext* cx, mozilla::Span chars, + unsigned radix) { + if (chars.empty()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_INVALID_SYNTAX); + return nullptr; + } + if (radix < 2 || radix > 36) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_RADIX); + return nullptr; + } + + mozilla::Span latin1{ + reinterpret_cast(chars.data()), chars.size()}; + bool haveParseError = false; + BigInt* bi = SimpleStringToBigIntHelper(cx, latin1, radix, &haveParseError); + if (!bi) { + if (haveParseError) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BIGINT_INVALID_SYNTAX); + } + return nullptr; + } + MOZ_RELEASE_ASSERT(!haveParseError); + return bi; +} + +BigInt* JS::ToBigInt(JSContext* cx, HandleValue val) { + return js::ToBigInt(cx, val); +} + +int64_t JS::ToBigInt64(JS::BigInt* bi) { return BigInt::toInt64(bi); } + +uint64_t JS::ToBigUint64(JS::BigInt* bi) { return BigInt::toUint64(bi); } + +// Semi-public template details + +BigInt* JS::detail::BigIntFromInt64(JSContext* cx, int64_t num) { + return BigInt::createFromInt64(cx, num); +} + +BigInt* JS::detail::BigIntFromUint64(JSContext* cx, uint64_t num) { + return BigInt::createFromUint64(cx, num); +} + +BigInt* JS::detail::BigIntFromBool(JSContext* cx, bool b) { + return b ? BigInt::one(cx) : BigInt::zero(cx); +} diff --git a/js/src/vm/BigIntType.h b/js/src/vm/BigIntType.h new file mode 100644 index 0000000000..0075397d77 --- /dev/null +++ b/js/src/vm/BigIntType.h @@ -0,0 +1,476 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BigIntType_h +#define vm_BigIntType_h + +#include "mozilla/Assertions.h" +#include "mozilla/Range.h" +#include "mozilla/Span.h" + +#include "jstypes.h" +#include "gc/Barrier.h" +#include "gc/GC.h" +#include "gc/Nursery.h" +#include "js/AllocPolicy.h" +#include "js/GCHashTable.h" +#include "js/Result.h" +#include "js/RootingAPI.h" +#include "js/TraceKind.h" +#include "js/TypeDecls.h" +#include "vm/StringType.h" +#include "vm/Xdr.h" + +namespace JS { + +class JS_PUBLIC_API BigInt; + +} // namespace JS + +namespace js { + +template +XDRResult XDRBigInt(XDRState* xdr, MutableHandle bi); + +} // namespace js + +namespace JS { + +class BigInt final : public js::gc::CellWithLengthAndFlags { + public: + using Digit = uintptr_t; + + private: + // The low CellFlagBitsReservedForGC flag bits are reserved. + static constexpr uintptr_t SignBit = + js::Bit(js::gc::CellFlagBitsReservedForGC); + + static constexpr size_t InlineDigitsLength = + (js::gc::MinCellSize - sizeof(CellWithLengthAndFlags)) / sizeof(Digit); + + public: + // The number of digits and the flags are stored in the cell header. + size_t digitLength() const { return headerLengthField(); } + + private: + // The digit storage starts with the least significant digit (little-endian + // digit order). Byte order within a digit is of course native endian. + union { + Digit* heapDigits_; + Digit inlineDigits_[InlineDigitsLength]; + }; + + void setLengthAndFlags(uint32_t len, uint32_t flags) { + setHeaderLengthAndFlags(len, flags); + } + + public: + static const JS::TraceKind TraceKind = JS::TraceKind::BigInt; + + void fixupAfterMovingGC() {} + + js::gc::AllocKind getAllocKind() const { return js::gc::AllocKind::BIGINT; } + + // Offset for direct access from JIT code. + static constexpr size_t offsetOfDigitLength() { + return offsetOfHeaderLength(); + } + + bool hasInlineDigits() const { return digitLength() <= InlineDigitsLength; } + bool hasHeapDigits() const { return !hasInlineDigits(); } + + using Digits = mozilla::Span; + Digits digits() { + return Digits(hasInlineDigits() ? inlineDigits_ : heapDigits_, + digitLength()); + } + using ConstDigits = mozilla::Span; + ConstDigits digits() const { + return ConstDigits(hasInlineDigits() ? inlineDigits_ : heapDigits_, + digitLength()); + } + Digit digit(size_t idx) const { return digits()[idx]; } + void setDigit(size_t idx, Digit digit) { digits()[idx] = digit; } + + bool isZero() const { return digitLength() == 0; } + bool isNegative() const { return headerFlagsField() & SignBit; } + + void initializeDigitsToZero(); + + void traceChildren(JSTracer* trc); + + static MOZ_ALWAYS_INLINE void postWriteBarrier(void* cellp, BigInt* prev, + BigInt* next) { + js::gc::PostWriteBarrierImpl(cellp, prev, next); + } + + void finalize(JSFreeOp* fop); + js::HashNumber hash() const; + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const; + size_t sizeOfExcludingThisInNursery(mozilla::MallocSizeOf mallocSizeOf) const; + + static BigInt* createUninitialized( + JSContext* cx, size_t digitLength, bool isNegative, + js::gc::InitialHeap heap = js::gc::DefaultHeap); + static BigInt* createFromDouble(JSContext* cx, double d); + static BigInt* createFromUint64(JSContext* cx, uint64_t n); + static BigInt* createFromInt64(JSContext* cx, int64_t n); + static BigInt* createFromDigit(JSContext* cx, Digit d, bool isNegative); + static BigInt* createFromNonZeroRawUint64(JSContext* cx, uint64_t n, + bool isNegative); + // FIXME: Cache these values. + static BigInt* zero(JSContext* cx, + js::gc::InitialHeap heap = js::gc::DefaultHeap); + static BigInt* one(JSContext* cx); + static BigInt* negativeOne(JSContext* cx); + + static BigInt* copy(JSContext* cx, Handle x, + js::gc::InitialHeap heap = js::gc::DefaultHeap); + static BigInt* add(JSContext* cx, Handle x, Handle y); + static BigInt* sub(JSContext* cx, Handle x, Handle y); + static BigInt* mul(JSContext* cx, Handle x, Handle y); + static BigInt* div(JSContext* cx, Handle x, Handle y); + static BigInt* mod(JSContext* cx, Handle x, Handle y); + static BigInt* pow(JSContext* cx, Handle x, Handle y); + static BigInt* neg(JSContext* cx, Handle x); + static BigInt* inc(JSContext* cx, Handle x); + static BigInt* dec(JSContext* cx, Handle x); + static BigInt* lsh(JSContext* cx, Handle x, Handle y); + static BigInt* rsh(JSContext* cx, Handle x, Handle y); + static BigInt* bitAnd(JSContext* cx, Handle x, Handle y); + static BigInt* bitXor(JSContext* cx, Handle x, Handle y); + static BigInt* bitOr(JSContext* cx, Handle x, Handle y); + static BigInt* bitNot(JSContext* cx, Handle x); + + static int64_t toInt64(BigInt* x); + static uint64_t toUint64(BigInt* x); + + // Return true if the BigInt is without loss of precision representable as an + // int64 and store the int64 value in the output. Otherwise return false and + // leave the value of the output parameter unspecified. + static bool isInt64(BigInt* x, int64_t* result); + + static BigInt* asIntN(JSContext* cx, Handle x, uint64_t bits); + static BigInt* asUintN(JSContext* cx, Handle x, uint64_t bits); + + // Type-checking versions of arithmetic operations. These methods + // must be called with at least one BigInt operand. Binary + // operations will throw a TypeError if one of the operands is not a + // BigInt value. + static bool addValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool subValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool mulValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool divValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool modValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool powValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool negValue(JSContext* cx, Handle operand, + MutableHandle res); + static bool incValue(JSContext* cx, Handle operand, + MutableHandle res); + static bool decValue(JSContext* cx, Handle operand, + MutableHandle res); + static bool lshValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool rshValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool bitAndValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool bitXorValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool bitOrValue(JSContext* cx, Handle lhs, Handle rhs, + MutableHandle res); + static bool bitNotValue(JSContext* cx, Handle operand, + MutableHandle res); + + static double numberValue(BigInt* x); + + template + static JSLinearString* toString(JSContext* cx, Handle x, + uint8_t radix); + template + static BigInt* parseLiteral(JSContext* cx, + const mozilla::Range chars, + bool* haveParseError); + template + static BigInt* parseLiteralDigits( + JSContext* cx, const mozilla::Range chars, unsigned radix, + bool isNegative, bool* haveParseError, + js::gc::InitialHeap heap = js::gc::DefaultHeap); + + template + static bool literalIsZero(const mozilla::Range chars); + + // Check a literal for a non-zero character after the radix indicators + // have been removed + template + static bool literalIsZeroNoRadix(const mozilla::Range chars); + + static int8_t compare(BigInt* lhs, BigInt* rhs); + static bool equal(BigInt* lhs, BigInt* rhs); + static bool equal(BigInt* lhs, double rhs); + static JS::Result equal(JSContext* cx, Handle lhs, + HandleString rhs); + static JS::Result looselyEqual(JSContext* cx, Handle lhs, + HandleValue rhs); + + static bool lessThan(BigInt* x, BigInt* y); + // These methods return Nothing when the non-BigInt operand is NaN + // or a string that can't be interpreted as a BigInt. + static mozilla::Maybe lessThan(BigInt* lhs, double rhs); + static mozilla::Maybe lessThan(double lhs, BigInt* rhs); + static bool lessThan(JSContext* cx, Handle lhs, HandleString rhs, + mozilla::Maybe& res); + static bool lessThan(JSContext* cx, HandleString lhs, Handle rhs, + mozilla::Maybe& res); + static bool lessThan(JSContext* cx, HandleValue lhs, HandleValue rhs, + mozilla::Maybe& res); + +#if defined(DEBUG) || defined(JS_JITSPEW) + void dump() const; // Debugger-friendly stderr dump. + void dump(js::GenericPrinter& out) const; +#endif + + public: + static constexpr size_t DigitBits = sizeof(Digit) * CHAR_BIT; + + private: + static constexpr size_t HalfDigitBits = DigitBits / 2; + static constexpr Digit HalfDigitMask = (1ull << HalfDigitBits) - 1; + + static_assert(DigitBits == 32 || DigitBits == 64, + "Unexpected BigInt Digit size"); + + // Limit the size of bigint values to 1 million bits, to prevent excessive + // memory usage. This limit may be raised in the future if needed. Note + // however that there are many parts of the implementation that rely on being + // able to count and index bits using a 32-bit signed ints, so until those + // sites are fixed, the practical limit is 0x7fffffff bits. + static constexpr size_t MaxBitLength = 1024 * 1024; + static constexpr size_t MaxDigitLength = MaxBitLength / DigitBits; + + // BigInts can be serialized to strings of radix between 2 and 36. For a + // given bigint, radix 2 will take the most characters (one per bit). + // Ensure that the max bigint size is small enough so that we can fit the + // corresponding character count into a size_t, with space for a possible + // sign prefix. + static_assert(MaxBitLength <= std::numeric_limits::max() - 1, + "BigInt max length must be small enough to be serialized as a " + "binary string"); + + static size_t calculateMaximumCharactersRequired(HandleBigInt x, + unsigned radix); + static MOZ_MUST_USE bool calculateMaximumDigitsRequired(JSContext* cx, + uint8_t radix, + size_t charCount, + size_t* result); + + static bool absoluteDivWithDigitDivisor( + JSContext* cx, Handle x, Digit divisor, + const mozilla::Maybe>& quotient, Digit* remainder, + bool quotientNegative); + static void internalMultiplyAdd(BigInt* source, Digit factor, Digit summand, + unsigned, BigInt* result); + static void multiplyAccumulate(BigInt* multiplicand, Digit multiplier, + BigInt* accumulator, + unsigned accumulatorIndex); + static bool absoluteDivWithBigIntDivisor( + JSContext* cx, Handle dividend, Handle divisor, + const mozilla::Maybe>& quotient, + const mozilla::Maybe>& remainder, + bool quotientNegative); + + enum class LeftShiftMode { SameSizeResult, AlwaysAddOneDigit }; + + static BigInt* absoluteLeftShiftAlwaysCopy(JSContext* cx, Handle x, + unsigned shift, LeftShiftMode); + static bool productGreaterThan(Digit factor1, Digit factor2, Digit high, + Digit low); + static BigInt* lshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y); + static BigInt* rshByAbsolute(JSContext* cx, HandleBigInt x, HandleBigInt y); + static BigInt* rshByMaximum(JSContext* cx, bool isNegative); + static BigInt* truncateAndSubFromPowerOfTwo(JSContext* cx, HandleBigInt x, + uint64_t bits, + bool resultNegative); + + Digit absoluteInplaceAdd(BigInt* summand, unsigned startIndex); + Digit absoluteInplaceSub(BigInt* subtrahend, unsigned startIndex); + void inplaceRightShiftLowZeroBits(unsigned shift); + void inplaceMultiplyAdd(Digit multiplier, Digit part); + + // The result of an SymmetricTrim bitwise op has as many digits as the + // smaller operand. A SymmetricFill bitwise op result has as many digits as + // the larger operand, with high digits (if any) copied from the larger + // operand. AsymmetricFill is like SymmetricFill, except the result has as + // many digits as the first operand; this kind is used for the and-not + // operation. + enum class BitwiseOpKind { SymmetricTrim, SymmetricFill, AsymmetricFill }; + + template + static BigInt* absoluteBitwiseOp(JSContext* cx, Handle x, + Handle y, BitwiseOp&& op); + + // Return `|x| & |y|`. + static BigInt* absoluteAnd(JSContext* cx, Handle x, + Handle y); + + // Return `|x| | |y|`. + static BigInt* absoluteOr(JSContext* cx, Handle x, + Handle y); + + // Return `|x| & ~|y|`. + static BigInt* absoluteAndNot(JSContext* cx, Handle x, + Handle y); + + // Return `|x| ^ |y|`. + static BigInt* absoluteXor(JSContext* cx, Handle x, + Handle y); + + // Return `(|x| + 1) * (resultNegative ? -1 : +1)`. + static BigInt* absoluteAddOne(JSContext* cx, Handle x, + bool resultNegative); + + // Return `(|x| - 1) * (resultNegative ? -1 : +1)`, with the precondition that + // |x| != 0. + static BigInt* absoluteSubOne(JSContext* cx, Handle x, + bool resultNegative = false); + + // Return `a + b`, incrementing `*carry` if the addition overflows. + static inline Digit digitAdd(Digit a, Digit b, Digit* carry) { + Digit result = a + b; + *carry += static_cast(result < a); + return result; + } + + // Return `left - right`, incrementing `*borrow` if the addition overflows. + static inline Digit digitSub(Digit left, Digit right, Digit* borrow) { + Digit result = left - right; + *borrow += static_cast(result > left); + return result; + } + + // Compute `a * b`, returning the low half of the result and putting the + // high half in `*high`. + static Digit digitMul(Digit a, Digit b, Digit* high); + + // Divide `(high << DigitBits) + low` by `divisor`, returning the quotient + // and storing the remainder in `*remainder`, with the precondition that + // `high < divisor` so that the result fits in a Digit. + static Digit digitDiv(Digit high, Digit low, Digit divisor, Digit* remainder); + + // Return `(|x| + |y|) * (resultNegative ? -1 : +1)`. + static BigInt* absoluteAdd(JSContext* cx, Handle x, + Handle y, bool resultNegative); + + // Return `(|x| - |y|) * (resultNegative ? -1 : +1)`, with the precondition + // that |x| >= |y|. + static BigInt* absoluteSub(JSContext* cx, Handle x, + Handle y, bool resultNegative); + + // If `|x| < |y|` return -1; if `|x| == |y|` return 0; otherwise return 1. + static int8_t absoluteCompare(BigInt* lhs, BigInt* rhs); + + static int8_t compare(BigInt* lhs, double rhs); + + template + static JSLinearString* toStringBasePowerOfTwo(JSContext* cx, Handle, + unsigned radix); + template + static JSLinearString* toStringSingleDigitBaseTen(JSContext* cx, Digit digit, + bool isNegative); + static JSLinearString* toStringGeneric(JSContext* cx, Handle, + unsigned radix); + + static BigInt* destructivelyTrimHighZeroDigits(JSContext* cx, BigInt* x); + + bool absFitsInUint64() const { return digitLength() <= 64 / DigitBits; } + + uint64_t uint64FromAbsNonZero() const { + MOZ_ASSERT(!isZero()); + + uint64_t val = digit(0); + if (DigitBits == 32 && digitLength() > 1) { + val |= static_cast(digit(1)) << 32; + } + return val; + } + + friend struct ::JSStructuredCloneReader; + friend struct ::JSStructuredCloneWriter; + template + friend js::XDRResult js::XDRBigInt(js::XDRState* xdr, + MutableHandle bi); + + BigInt() = delete; + BigInt(const BigInt& other) = delete; + void operator=(const BigInt& other) = delete; + + public: + static constexpr size_t offsetOfFlags() { return offsetOfHeaderFlags(); } + static constexpr size_t offsetOfLength() { return offsetOfHeaderLength(); } + + static constexpr size_t signBitMask() { return SignBit; } + + private: + // To help avoid writing Spectre-unsafe code, we only allow MacroAssembler to + // call the methods below. + friend class js::jit::MacroAssembler; + + static size_t offsetOfInlineDigits() { + return offsetof(BigInt, inlineDigits_); + } + + static size_t offsetOfHeapDigits() { return offsetof(BigInt, heapDigits_); } + + static constexpr size_t inlineDigitsLength() { return InlineDigitsLength; } + + private: + friend class js::TenuringTracer; +}; + +static_assert( + sizeof(BigInt) >= js::gc::MinCellSize, + "sizeof(BigInt) must be greater than the minimum allocation size"); + +static_assert( + sizeof(BigInt) == js::gc::MinCellSize, + "sizeof(BigInt) intended to be the same as the minimum allocation size"); + +} // namespace JS + +namespace js { + +template +extern JSAtom* BigIntToAtom(JSContext* cx, JS::HandleBigInt bi); + +extern JS::BigInt* NumberToBigInt(JSContext* cx, double d); + +// Parse a BigInt from a string, using the method specified for StringToBigInt. +// Used by the BigInt constructor among other places. +extern JS::Result StringToBigInt( + JSContext* cx, JS::Handle str); + +// Parse a BigInt from an already-validated numeric literal. Used by the +// parser. Can only fail in out-of-memory situations. +extern JS::BigInt* ParseBigIntLiteral( + JSContext* cx, const mozilla::Range& chars); + +// Check an already validated numeric literal for a non-zero value. Used by +// the parsers node folder in deferred mode. +extern bool BigIntLiteralIsZero(const mozilla::Range& chars); + +extern JS::BigInt* ToBigInt(JSContext* cx, JS::Handle v); +extern JS::Result ToBigInt64(JSContext* cx, JS::Handle v); +extern JS::Result ToBigUint64(JSContext* cx, JS::Handle v); + +} // namespace js + +#endif diff --git a/js/src/vm/BindingKind.h b/js/src/vm/BindingKind.h new file mode 100644 index 0000000000..1664dd1878 --- /dev/null +++ b/js/src/vm/BindingKind.h @@ -0,0 +1,102 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BindingKind_h +#define vm_BindingKind_h + +#include "mozilla/Assertions.h" // MOZ_ASSERT, MOZ_ASSERT_IF +#include "mozilla/Casting.h" // mozilla::AssertedCast + +#include // uint16_t, uint32_t + +#include "vm/BytecodeUtil.h" // LOCALNO_LIMIT, ENVCOORD_SLOT_LIMIT + +namespace js { + +enum class BindingKind : uint8_t { + Import, + FormalParameter, + Var, + Let, + Const, + + // So you think named lambda callee names are consts? Nope! They don't + // throw when being assigned to in sloppy mode. + NamedLambdaCallee +}; + +static inline bool BindingKindIsLexical(BindingKind kind) { + return kind == BindingKind::Let || kind == BindingKind::Const; +} + +class BindingLocation { + public: + enum class Kind { + Global, + Argument, + Frame, + Environment, + Import, + NamedLambdaCallee + }; + + private: + Kind kind_; + uint32_t slot_; + + BindingLocation(Kind kind, uint32_t slot) : kind_(kind), slot_(slot) {} + + public: + static BindingLocation Global() { + return BindingLocation(Kind::Global, UINT32_MAX); + } + + static BindingLocation Argument(uint16_t slot) { + return BindingLocation(Kind::Argument, slot); + } + + static BindingLocation Frame(uint32_t slot) { + MOZ_ASSERT(slot < LOCALNO_LIMIT); + return BindingLocation(Kind::Frame, slot); + } + + static BindingLocation Environment(uint32_t slot) { + MOZ_ASSERT(slot < ENVCOORD_SLOT_LIMIT); + return BindingLocation(Kind::Environment, slot); + } + + static BindingLocation Import() { + return BindingLocation(Kind::Import, UINT32_MAX); + } + + static BindingLocation NamedLambdaCallee() { + return BindingLocation(Kind::NamedLambdaCallee, UINT32_MAX); + } + + bool operator==(const BindingLocation& other) const { + return kind_ == other.kind_ && slot_ == other.slot_; + } + + bool operator!=(const BindingLocation& other) const { + return !operator==(other); + } + + Kind kind() const { return kind_; } + + uint32_t slot() const { + MOZ_ASSERT(kind_ == Kind::Frame || kind_ == Kind::Environment); + return slot_; + } + + uint16_t argumentSlot() const { + MOZ_ASSERT(kind_ == Kind::Argument); + return mozilla::AssertedCast(slot_); + } +}; + +} // namespace js + +#endif // vm_BindingKind_h diff --git a/js/src/vm/BooleanObject-inl.h b/js/src/vm/BooleanObject-inl.h new file mode 100644 index 0000000000..a8d9376403 --- /dev/null +++ b/js/src/vm/BooleanObject-inl.h @@ -0,0 +1,28 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BooleanObject_inl_h +#define vm_BooleanObject_inl_h + +#include "vm/BooleanObject.h" + +#include "vm/JSObject-inl.h" + +namespace js { + +inline BooleanObject* BooleanObject::create( + JSContext* cx, bool b, HandleObject proto /* = nullptr */) { + BooleanObject* obj = NewObjectWithClassProto(cx, proto); + if (!obj) { + return nullptr; + } + obj->setPrimitiveValue(b); + return obj; +} + +} // namespace js + +#endif /* vm_BooleanObject_inl_h */ diff --git a/js/src/vm/BooleanObject.h b/js/src/vm/BooleanObject.h new file mode 100644 index 0000000000..4c27e0e199 --- /dev/null +++ b/js/src/vm/BooleanObject.h @@ -0,0 +1,46 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BooleanObject_h +#define vm_BooleanObject_h + +#include "builtin/Boolean.h" + +#include "vm/NativeObject.h" + +namespace js { + +class BooleanObject : public NativeObject { + /* Stores this Boolean object's [[PrimitiveValue]]. */ + static const unsigned PRIMITIVE_VALUE_SLOT = 0; + + static const ClassSpec classSpec_; + + public: + static const unsigned RESERVED_SLOTS = 1; + + static const JSClass class_; + + /* + * Creates a new Boolean object boxing the given primitive bool. + * If proto is nullptr, the [[Prototype]] will default to Boolean.prototype. + */ + static inline BooleanObject* create(JSContext* cx, bool b, + HandleObject proto = nullptr); + + bool unbox() const { return getFixedSlot(PRIMITIVE_VALUE_SLOT).toBoolean(); } + + private: + static JSObject* createPrototype(JSContext* cx, JSProtoKey key); + + inline void setPrimitiveValue(bool b) { + setFixedSlot(PRIMITIVE_VALUE_SLOT, BooleanValue(b)); + } +}; + +} // namespace js + +#endif /* vm_BooleanObject_h */ diff --git a/js/src/vm/BuildId.cpp b/js/src/vm/BuildId.cpp new file mode 100644 index 0000000000..6183a79014 --- /dev/null +++ b/js/src/vm/BuildId.cpp @@ -0,0 +1,27 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* SpiderMonkey buildId-related functionality. */ + +#include "js/BuildId.h" // JS::BuildIdCharVector, JS::BuildIdOp, JS::GetOptimizedEncodingBuildId, JS::SetProcessBuildIdOp + +#include "mozilla/Atomics.h" // mozilla::Atomic + +#include "jstypes.h" // JS_PUBLIC_API + +#include "vm/Runtime.h" // js::GetBuildId +#include "wasm/WasmModule.h" // js::wasm::GetOptimizedEncodingBuildId + +mozilla::Atomic js::GetBuildId; + +JS_PUBLIC_API void JS::SetProcessBuildIdOp(JS::BuildIdOp buildIdOp) { + js::GetBuildId = buildIdOp; +} + +JS_PUBLIC_API bool JS::GetOptimizedEncodingBuildId( + JS::BuildIdCharVector* buildId) { + return js::wasm::GetOptimizedEncodingBuildId(buildId); +} diff --git a/js/src/vm/BuiltinObjectKind.cpp b/js/src/vm/BuiltinObjectKind.cpp new file mode 100644 index 0000000000..c9d17f180e --- /dev/null +++ b/js/src/vm/BuiltinObjectKind.cpp @@ -0,0 +1,175 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/BuiltinObjectKind.h" + +#include "jspubtd.h" + +#include "vm/GlobalObject.h" +#include "vm/JSContext.h" + +using namespace js; + +static JSProtoKey ToProtoKey(BuiltinObjectKind kind) { + switch (kind) { + case BuiltinObjectKind::Array: + return JSProto_Array; + case BuiltinObjectKind::ArrayBuffer: + return JSProto_ArrayBuffer; + case BuiltinObjectKind::Iterator: + return JSProto_Iterator; + case BuiltinObjectKind::Promise: + return JSProto_Promise; + case BuiltinObjectKind::RegExp: + return JSProto_RegExp; + case BuiltinObjectKind::SharedArrayBuffer: + return JSProto_SharedArrayBuffer; + + case BuiltinObjectKind::FunctionPrototype: + return JSProto_Function; + case BuiltinObjectKind::ObjectPrototype: + return JSProto_Object; + case BuiltinObjectKind::RegExpPrototype: + return JSProto_RegExp; + case BuiltinObjectKind::StringPrototype: + return JSProto_String; + + case BuiltinObjectKind::DateTimeFormat: + return JSProto_DateTimeFormat; + case BuiltinObjectKind::NumberFormat: + return JSProto_NumberFormat; + + case BuiltinObjectKind::None: + break; + } + MOZ_CRASH("Unexpected builtin object kind"); +} + +static bool IsPrototype(BuiltinObjectKind kind) { + switch (kind) { + case BuiltinObjectKind::Array: + case BuiltinObjectKind::ArrayBuffer: + case BuiltinObjectKind::Iterator: + case BuiltinObjectKind::Promise: + case BuiltinObjectKind::RegExp: + case BuiltinObjectKind::SharedArrayBuffer: + return false; + + case BuiltinObjectKind::FunctionPrototype: + case BuiltinObjectKind::ObjectPrototype: + case BuiltinObjectKind::RegExpPrototype: + case BuiltinObjectKind::StringPrototype: + return true; + + case BuiltinObjectKind::DateTimeFormat: + case BuiltinObjectKind::NumberFormat: + return false; + + case BuiltinObjectKind::None: + break; + } + MOZ_CRASH("Unexpected builtin object kind"); +} + +using BuiltinName = + const js::frontend::ParserName* js::frontend::WellKnownParserAtoms::*; + +struct BuiltinObjectMap { + BuiltinName name; + BuiltinObjectKind kind; +}; + +BuiltinObjectKind js::BuiltinConstructorForName( + JSContext* cx, const js::frontend::ParserAtom* name) { + using WellKnownName = js::frontend::WellKnownParserAtoms; + static constexpr BuiltinObjectMap constructors[] = { + {&WellKnownName::Array, BuiltinObjectKind::Array}, + {&WellKnownName::ArrayBuffer, BuiltinObjectKind::ArrayBuffer}, + {&WellKnownName::Iterator, BuiltinObjectKind::Iterator}, + {&WellKnownName::Promise, BuiltinObjectKind::Promise}, + {&WellKnownName::RegExp, BuiltinObjectKind::RegExp}, + {&WellKnownName::SharedArrayBuffer, BuiltinObjectKind::SharedArrayBuffer}, + {&WellKnownName::DateTimeFormat, BuiltinObjectKind::DateTimeFormat}, + {&WellKnownName::NumberFormat, BuiltinObjectKind::NumberFormat}, + }; + + for (auto& builtin : constructors) { + if (name == cx->parserNames().*(builtin.name)) { + return builtin.kind; + } + } + return BuiltinObjectKind::None; +} + +BuiltinObjectKind js::BuiltinPrototypeForName( + JSContext* cx, const js::frontend::ParserAtom* name) { + using WellKnownName = js::frontend::WellKnownParserAtoms; + static constexpr BuiltinObjectMap prototypes[] = { + {&WellKnownName::Function, BuiltinObjectKind::FunctionPrototype}, + {&WellKnownName::Object, BuiltinObjectKind::ObjectPrototype}, + {&WellKnownName::RegExp, BuiltinObjectKind::RegExpPrototype}, + {&WellKnownName::String, BuiltinObjectKind::StringPrototype}, + }; + + for (auto& builtin : prototypes) { + if (name == cx->parserNames().*(builtin.name)) { + return builtin.kind; + } + } + return BuiltinObjectKind::None; +} + +JSObject* js::MaybeGetBuiltinObject(GlobalObject* global, + BuiltinObjectKind kind) { + JSProtoKey key = ToProtoKey(kind); + if (IsPrototype(kind)) { + return global->maybeGetPrototype(key); + } + return global->maybeGetConstructor(key); +} + +JSObject* js::GetOrCreateBuiltinObject(JSContext* cx, BuiltinObjectKind kind) { + JSProtoKey key = ToProtoKey(kind); + if (IsPrototype(kind)) { + return GlobalObject::getOrCreatePrototype(cx, key); + } + return GlobalObject::getOrCreateConstructor(cx, key); +} + +const char* js::BuiltinObjectName(BuiltinObjectKind kind) { + switch (kind) { + case BuiltinObjectKind::Array: + return "Array"; + case BuiltinObjectKind::ArrayBuffer: + return "ArrayBuffer"; + case BuiltinObjectKind::Iterator: + return "Iterator"; + case BuiltinObjectKind::Promise: + return "Promise"; + case BuiltinObjectKind::RegExp: + return "RegExp"; + case BuiltinObjectKind::SharedArrayBuffer: + return "SharedArrayBuffer"; + + case BuiltinObjectKind::FunctionPrototype: + return "Function.prototype"; + case BuiltinObjectKind::ObjectPrototype: + return "Object.prototype"; + case BuiltinObjectKind::RegExpPrototype: + return "RegExp.prototype"; + case BuiltinObjectKind::StringPrototype: + return "String.prototype"; + + case BuiltinObjectKind::DateTimeFormat: + return "DateTimeFormat"; + case BuiltinObjectKind::NumberFormat: + return "NumberFormat"; + + case BuiltinObjectKind::None: + break; + } + MOZ_CRASH("Unexpected builtin object kind"); +} diff --git a/js/src/vm/BuiltinObjectKind.h b/js/src/vm/BuiltinObjectKind.h new file mode 100644 index 0000000000..3f559c0806 --- /dev/null +++ b/js/src/vm/BuiltinObjectKind.h @@ -0,0 +1,85 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BuiltinObjectKind_h +#define vm_BuiltinObjectKind_h + +#include + +#include "jstypes.h" + +class JS_PUBLIC_API JSAtom; +struct JS_PUBLIC_API JSContext; +class JS_PUBLIC_API JSObject; + +namespace js { + +namespace frontend { +class ParserAtom; +} // namespace frontend + +class GlobalObject; + +/** + * Built-in objects used by the GetBuiltinConstructor and GetBuiltinPrototype + * self-hosted intrinsics. + */ +enum class BuiltinObjectKind : uint8_t { + // Built-in constructors. + Array, + ArrayBuffer, + Iterator, + Promise, + RegExp, + SharedArrayBuffer, + + // Built-in prototypes. + FunctionPrototype, + ObjectPrototype, + RegExpPrototype, + StringPrototype, + + // Built-in Intl constructors. + DateTimeFormat, + NumberFormat, + + // Invalid placeholder. + None, +}; + +/** + * Return the BuiltinObjectKind for the given constructor name. Return + * BuiltinObjectKind::None if no matching constructor was found. + */ +BuiltinObjectKind BuiltinConstructorForName(JSContext* cx, + const frontend::ParserAtom* name); + +/** + * Return the BuiltinObjectKind for the given prototype name. Return + * BuiltinObjectKind::None if no matching prototype was found. + */ +BuiltinObjectKind BuiltinPrototypeForName(JSContext* cx, + const frontend::ParserAtom* name); + +/** + * Return the built-in object if already created for the given global. Otherwise + * return nullptr. + */ +JSObject* MaybeGetBuiltinObject(GlobalObject* global, BuiltinObjectKind kind); + +/** + * Return the built-in object for the given global. + */ +JSObject* GetOrCreateBuiltinObject(JSContext* cx, BuiltinObjectKind kind); + +/** + * Return the display name for a built-in object. + */ +const char* BuiltinObjectName(BuiltinObjectKind kind); + +} // namespace js + +#endif /* vm_BuiltinObjectKind_h */ diff --git a/js/src/vm/BytecodeFormatFlags.h b/js/src/vm/BytecodeFormatFlags.h new file mode 100644 index 0000000000..c39abe2012 --- /dev/null +++ b/js/src/vm/BytecodeFormatFlags.h @@ -0,0 +1,57 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeFormatFlags_h +#define vm_BytecodeFormatFlags_h + +/* + * [SMDOC] Bytecode Format flags (JOF_*) + */ +enum { + JOF_BYTE = 0, /* single bytecode, no immediates */ + JOF_UINT8 = 1, /* unspecified uint8_t argument */ + JOF_UINT16 = 2, /* unspecified uint16_t argument */ + JOF_UINT24 = 3, /* unspecified uint24_t argument */ + JOF_UINT32 = 4, /* unspecified uint32_t argument */ + JOF_INT8 = 5, /* int8_t literal */ + JOF_INT32 = 6, /* int32_t literal */ + JOF_JUMP = 7, /* int32_t jump offset */ + JOF_TABLESWITCH = 8, /* table switch */ + JOF_ENVCOORD = 9, /* embedded ScopeCoordinate immediate */ + JOF_ARGC = 10, /* uint16_t argument count */ + JOF_QARG = 11, /* function argument index */ + JOF_LOCAL = 12, /* var or block-local variable */ + JOF_RESUMEINDEX = 13, /* yield, await, or gosub resume index */ + JOF_DOUBLE = 14, /* inline DoubleValue */ + JOF_GCTHING = 15, /* uint32_t generic gc-thing index */ + JOF_ATOM = 16, /* uint32_t constant index */ + JOF_OBJECT = 17, /* uint32_t object index */ + JOF_REGEXP = 18, /* uint32_t regexp index */ + JOF_SCOPE = 19, /* uint32_t scope index */ + JOF_BIGINT = 20, /* uint32_t index for BigInt value */ + JOF_ICINDEX = 21, /* uint32_t IC index */ + JOF_LOOPHEAD = 22, /* JSOp::LoopHead, combines JOF_ICINDEX and JOF_UINT8 */ + JOF_CLASS_CTOR = 23, /* uint32_t atom index, sourceStart, sourceEnd */ + JOF_TWO_UINT8 = 24, /* A pair of unspecified uint8_t arguments */ + JOF_TYPEMASK = 0xFF, /* mask for above immediate types */ + + JOF_NAME = 1 << 8, /* name operation */ + JOF_PROP = 2 << 8, /* obj.prop operation */ + JOF_ELEM = 3 << 8, /* obj[index] operation */ + JOF_MODEMASK = 0xFF << 8, /* mask for above addressing modes */ + + JOF_PROPSET = 1 << 16, /* property/element/name set operation */ + JOF_PROPINIT = 1 << 17, /* property/element/name init operation */ + JOF_CHECKSLOPPY = 1 << 18, /* op can only be generated in sloppy mode */ + JOF_CHECKSTRICT = 1 << 19, /* op can only be generated in strict mode */ + JOF_INVOKE = 1 << 20, /* any call, construct, or eval instruction */ + JOF_CONSTRUCT = 1 << 21, /* invoke instruction using [[Construct]] entry */ + JOF_SPREAD = 1 << 22, /* invoke instruction using spread argument */ + JOF_GNAME = 1 << 23, /* predicted global name */ + JOF_IC = 1 << 24, /* baseline may use an IC for this op */ +}; + +#endif /* vm_BytecodeFormatFlags_h */ diff --git a/js/src/vm/BytecodeIterator-inl.h b/js/src/vm/BytecodeIterator-inl.h new file mode 100644 index 0000000000..37e42fc88d --- /dev/null +++ b/js/src/vm/BytecodeIterator-inl.h @@ -0,0 +1,40 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeIterator_inl_h +#define vm_BytecodeIterator_inl_h + +#include "vm/BytecodeIterator.h" + +#include "vm/JSScript.h" + +namespace js { + +inline BytecodeIterator::BytecodeIterator(const JSScript* script) + : current_(script, script->code()) {} + +// AllBytecodesIterable + +inline BytecodeIterator AllBytecodesIterable::begin() { + return BytecodeIterator(script_); +} + +inline BytecodeIterator AllBytecodesIterable::end() { + return BytecodeIterator(BytecodeLocation(script_, script_->codeEnd())); +} + +// BytecodeLocationRange + +inline BytecodeIterator BytecodeLocationRange::begin() { + return BytecodeIterator(beginLoc_); +} + +inline BytecodeIterator BytecodeLocationRange::end() { + return BytecodeIterator(endLoc_); +} + +} // namespace js +#endif diff --git a/js/src/vm/BytecodeIterator.h b/js/src/vm/BytecodeIterator.h new file mode 100644 index 0000000000..afc84e0451 --- /dev/null +++ b/js/src/vm/BytecodeIterator.h @@ -0,0 +1,85 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeIterator_h +#define vm_BytecodeIterator_h + +#include "vm/BytecodeLocation.h" + +namespace js { + +class BytecodeIterator { + BytecodeLocation current_; + + public: + inline explicit BytecodeIterator(const JSScript* script); + + explicit BytecodeIterator(BytecodeLocation loc) : current_(loc) {} + + BytecodeIterator& operator=(const BytecodeIterator&) = default; + + bool operator==(const BytecodeIterator& other) const { + return other.current_ == current_; + } + + bool operator!=(const BytecodeIterator& other) const { + return !(other.current_ == current_); + } + + const BytecodeLocation& operator*() const { return current_; } + + const BytecodeLocation* operator->() const { return ¤t_; } + + // Pre-increment + BytecodeIterator& operator++() { + current_ = current_.next(); + return *this; + } + + // Post-increment + BytecodeIterator operator++(int) { + BytecodeIterator previous(*this); + current_ = current_.next(); + return previous; + } +}; + +// Given a JSScript, allow the construction of a range based for-loop +// that will visit all script locations in that script. +class AllBytecodesIterable { + const JSScript* script_; + + public: + explicit AllBytecodesIterable(const JSScript* script) : script_(script) {} + + BytecodeIterator begin(); + BytecodeIterator end(); +}; + +// Construct a range based iterator that will visit all bytecode locations +// between two given bytecode locations. +// `beginLoc_` is the bytecode location where the iterator will start, and +// `endLoc_` is the bytecode location where the iterator will end. +class BytecodeLocationRange { + BytecodeLocation beginLoc_; + BytecodeLocation endLoc_; + + public: + explicit BytecodeLocationRange(BytecodeLocation beginLoc, + BytecodeLocation endLoc) + : beginLoc_(beginLoc), endLoc_(endLoc) { +#ifdef DEBUG + MOZ_ASSERT(beginLoc.hasSameScript(endLoc)); +#endif + } + + BytecodeIterator begin(); + BytecodeIterator end(); +}; + +} // namespace js + +#endif diff --git a/js/src/vm/BytecodeLocation-inl.h b/js/src/vm/BytecodeLocation-inl.h new file mode 100644 index 0000000000..f254515a50 --- /dev/null +++ b/js/src/vm/BytecodeLocation-inl.h @@ -0,0 +1,111 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeLocation_inl_h +#define vm_BytecodeLocation_inl_h + +#include "vm/BytecodeLocation.h" + +#include "vm/JSScript.h" + +#include "vm/BytecodeUtil-inl.h" +#include "vm/JSScript-inl.h" + +namespace js { + +inline uint32_t BytecodeLocation::bytecodeToOffset( + const JSScript* script) const { + MOZ_ASSERT(this->isInBounds()); + return script->pcToOffset(this->rawBytecode_); +} + +inline JSAtom* BytecodeLocation::getAtom(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->getAtom(this->rawBytecode_); +} + +inline PropertyName* BytecodeLocation::getPropertyName( + const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->getName(this->rawBytecode_); +} + +inline JS::BigInt* BytecodeLocation::getBigInt(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + MOZ_ASSERT(is(JSOp::BigInt)); + return script->getBigInt(this->rawBytecode_); +} + +inline JSObject* BytecodeLocation::getObject(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + MOZ_ASSERT(is(JSOp::CallSiteObj) || is(JSOp::Object)); + return script->getObject(this->rawBytecode_); +} + +inline JSFunction* BytecodeLocation::getFunction(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + MOZ_ASSERT(is(JSOp::Lambda) || is(JSOp::LambdaArrow) || + is(JSOp::FunWithProto)); + return script->getFunction(this->rawBytecode_); +} + +inline js::RegExpObject* BytecodeLocation::getRegExp( + const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + MOZ_ASSERT(is(JSOp::RegExp)); + return script->getRegExp(this->rawBytecode_); +} + +inline js::Scope* BytecodeLocation::getScope(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->getScope(this->rawBytecode_); +} + +inline Scope* BytecodeLocation::innermostScope(const JSScript* script) const { + MOZ_ASSERT(this->isValid()); + return script->innermostScope(this->rawBytecode_); +} + +inline uint32_t BytecodeLocation::tableSwitchCaseOffset( + const JSScript* script, uint32_t caseIndex) const { + return script->tableSwitchCaseOffset(this->rawBytecode_, caseIndex); +} + +inline uint32_t BytecodeLocation::getJumpTargetOffset( + const JSScript* script) const { + MOZ_ASSERT(this->isJump()); + return this->bytecodeToOffset(script) + GET_JUMP_OFFSET(this->rawBytecode_); +} + +inline uint32_t BytecodeLocation::getTableSwitchDefaultOffset( + const JSScript* script) const { + MOZ_ASSERT(this->is(JSOp::TableSwitch)); + return this->bytecodeToOffset(script) + GET_JUMP_OFFSET(this->rawBytecode_); +} + +BytecodeLocation BytecodeLocation::getTableSwitchDefaultTarget() const { + MOZ_ASSERT(is(JSOp::TableSwitch)); + return BytecodeLocation(*this, rawBytecode_ + GET_JUMP_OFFSET(rawBytecode_)); +} + +BytecodeLocation BytecodeLocation::getTableSwitchCaseTarget( + const JSScript* script, uint32_t caseIndex) const { + MOZ_ASSERT(is(JSOp::TableSwitch)); + jsbytecode* casePC = script->tableSwitchCasePC(rawBytecode_, caseIndex); + return BytecodeLocation(*this, casePC); +} + +inline uint32_t BytecodeLocation::useCount() const { + return GetUseCount(this->rawBytecode_); +} + +inline uint32_t BytecodeLocation::defCount() const { + return GetDefCount(this->rawBytecode_); +} + +} // namespace js + +#endif diff --git a/js/src/vm/BytecodeLocation.cpp b/js/src/vm/BytecodeLocation.cpp new file mode 100644 index 0000000000..fae05a9275 --- /dev/null +++ b/js/src/vm/BytecodeLocation.cpp @@ -0,0 +1,28 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/BytecodeLocation-inl.h" + +#include "vm/JSScript.h" + +using namespace js; + +#ifdef DEBUG +bool BytecodeLocation::isValid(const JSScript* script) const { + // Note: Don't create a new BytecodeLocation during the implementation of + // this, as it is used in the constructor, and will recurse forever. + return script->contains(*this) || toRawBytecode() == script->codeEnd(); +} + +bool BytecodeLocation::isInBounds(const JSScript* script) const { + return script->contains(*this); +} + +const JSScript* BytecodeLocation::getDebugOnlyScript() const { + return this->debugOnlyScript_; +} + +#endif // DEBUG diff --git a/js/src/vm/BytecodeLocation.h b/js/src/vm/BytecodeLocation.h new file mode 100644 index 0000000000..b3932fb3f1 --- /dev/null +++ b/js/src/vm/BytecodeLocation.h @@ -0,0 +1,347 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeLocation_h +#define vm_BytecodeLocation_h + +#include "frontend/NameAnalysisTypes.h" +#include "js/TypeDecls.h" +#include "vm/AsyncFunctionResolveKind.h" +#include "vm/BuiltinObjectKind.h" +#include "vm/BytecodeUtil.h" +#include "vm/CheckIsObjectKind.h" // CheckIsObjectKind +#include "vm/FunctionPrefixKind.h" // FunctionPrefixKind +#include "vm/GeneratorResumeKind.h" +#include "vm/StringType.h" + +namespace js { + +using RawBytecodeLocationOffset = uint32_t; + +class PropertyName; +class RegExpObject; + +class BytecodeLocationOffset { + RawBytecodeLocationOffset rawOffset_; + + public: + explicit BytecodeLocationOffset(RawBytecodeLocationOffset offset) + : rawOffset_(offset) {} + + RawBytecodeLocationOffset rawOffset() const { return rawOffset_; } +}; + +using RawBytecode = jsbytecode*; + +// A immutable representation of a program location +// +class BytecodeLocation { + RawBytecode rawBytecode_; +#ifdef DEBUG + const JSScript* debugOnlyScript_; +#endif + + // Construct a new BytecodeLocation, while borrowing scriptIdentity + // from some other BytecodeLocation. + BytecodeLocation(const BytecodeLocation& loc, RawBytecode pc) + : rawBytecode_(pc) +#ifdef DEBUG + , + debugOnlyScript_(loc.debugOnlyScript_) +#endif + { + MOZ_ASSERT(isValid()); + } + + public: + // Disallow the creation of an uninitialized location. + BytecodeLocation() = delete; + + BytecodeLocation(const JSScript* script, RawBytecode pc) + : rawBytecode_(pc) +#ifdef DEBUG + , + debugOnlyScript_(script) +#endif + { + MOZ_ASSERT(isValid()); + } + + RawBytecode toRawBytecode() const { return rawBytecode_; } + +#ifdef DEBUG + // Return true if this bytecode location is valid for the given script. + // This includes the location 1-past the end of the bytecode. + bool isValid(const JSScript* script) const; + + // Return true if this bytecode location is within the bounds of the + // bytecode for a given script. + bool isInBounds(const JSScript* script) const; + + const JSScript* getDebugOnlyScript() const; +#endif + + inline uint32_t bytecodeToOffset(const JSScript* script) const; + + inline uint32_t tableSwitchCaseOffset(const JSScript* script, + uint32_t caseIndex) const; + + inline uint32_t getJumpTargetOffset(const JSScript* script) const; + + inline uint32_t getTableSwitchDefaultOffset(const JSScript* script) const; + + inline BytecodeLocation getTableSwitchDefaultTarget() const; + inline BytecodeLocation getTableSwitchCaseTarget(const JSScript* script, + uint32_t caseIndex) const; + + inline uint32_t useCount() const; + inline uint32_t defCount() const; + + int32_t jumpOffset() const { return GET_JUMP_OFFSET(rawBytecode_); } + + inline JSAtom* getAtom(const JSScript* script) const; + inline PropertyName* getPropertyName(const JSScript* script) const; + inline JS::BigInt* getBigInt(const JSScript* script) const; + inline JSObject* getObject(const JSScript* script) const; + inline JSFunction* getFunction(const JSScript* script) const; + inline js::RegExpObject* getRegExp(const JSScript* script) const; + inline js::Scope* getScope(const JSScript* script) const; + + uint32_t getSymbolIndex() const { + MOZ_ASSERT(is(JSOp::Symbol)); + return GET_UINT8(rawBytecode_); + } + + inline Scope* innermostScope(const JSScript* script) const; + +#ifdef DEBUG + bool hasSameScript(const BytecodeLocation& other) const { + return debugOnlyScript_ == other.debugOnlyScript_; + } +#endif + + // Overloaded operators + + bool operator==(const BytecodeLocation& other) const { + MOZ_ASSERT(this->debugOnlyScript_ == other.debugOnlyScript_); + return rawBytecode_ == other.rawBytecode_; + } + + bool operator!=(const BytecodeLocation& other) const { + return !(other == *this); + } + + bool operator<(const BytecodeLocation& other) const { + MOZ_ASSERT(this->debugOnlyScript_ == other.debugOnlyScript_); + return rawBytecode_ < other.rawBytecode_; + } + + // It is traditional to represent the rest of the relational operators + // using operator<, so we don't need to assert for these. + bool operator>(const BytecodeLocation& other) const { return other < *this; } + + bool operator<=(const BytecodeLocation& other) const { + return !(other < *this); + } + + bool operator>=(const BytecodeLocation& other) const { + return !(*this < other); + } + + // Return the next bytecode + BytecodeLocation next() const { + return BytecodeLocation(*this, + rawBytecode_ + GetBytecodeLength(rawBytecode_)); + } + + // Add an offset. + BytecodeLocation operator+(const BytecodeLocationOffset& offset) { + return BytecodeLocation(*this, rawBytecode_ + offset.rawOffset()); + } + + // Identity Checks + bool is(JSOp op) const { + MOZ_ASSERT(isInBounds()); + return getOp() == op; + } + + // Accessors: + + uint32_t length() const { return GetBytecodeLength(rawBytecode_); } + + bool isJumpTarget() const { return BytecodeIsJumpTarget(getOp()); } + + bool isJump() const { return IsJumpOpcode(getOp()); } + + bool isBackedge() const { return IsBackedgePC(rawBytecode_); } + + bool isBackedgeForLoophead(BytecodeLocation loopHead) const { + return IsBackedgeForLoopHead(rawBytecode_, loopHead.rawBytecode_); + } + + bool opHasIC() const { return BytecodeOpHasIC(getOp()); } + + bool fallsThrough() const { return BytecodeFallsThrough(getOp()); } + + uint32_t icIndex() const { return GET_ICINDEX(rawBytecode_); } + + uint32_t local() const { return GET_LOCALNO(rawBytecode_); } + + uint16_t arg() const { return GET_ARGNO(rawBytecode_); } + + bool isEqualityOp() const { return IsEqualityOp(getOp()); } + + bool isStrictEqualityOp() const { return IsStrictEqualityOp(getOp()); } + + bool isStrictSetOp() const { return IsStrictSetPC(rawBytecode_); } + + bool isNameOp() const { return IsNameOp(getOp()); } + + bool isSpreadOp() const { return IsSpreadOp(getOp()); } + + bool isInvokeOp() const { return IsInvokeOp(getOp()); } + + bool isGetPropOp() const { return IsGetPropOp(getOp()); } + + bool isSetPropOp() const { return IsSetPropOp(getOp()); } + + AsyncFunctionResolveKind getAsyncFunctionResolveKind() { + return AsyncFunctionResolveKind(GET_UINT8(rawBytecode_)); + } + + bool resultIsPopped() const { + MOZ_ASSERT(StackDefs(rawBytecode_) == 1); + return BytecodeIsPopped(rawBytecode_); + } + + // Accessors: + JSOp getOp() const { return JSOp(*rawBytecode_); } + + BytecodeLocation getJumpTarget() const { + MOZ_ASSERT(isJump()); + return BytecodeLocation(*this, + rawBytecode_ + GET_JUMP_OFFSET(rawBytecode_)); + } + + // Return the 'low' parameter to the tableswitch opcode + int32_t getTableSwitchLow() const { + MOZ_ASSERT(is(JSOp::TableSwitch)); + return GET_JUMP_OFFSET(rawBytecode_ + JUMP_OFFSET_LEN); + } + + // Return the 'high' parameter to the tableswitch opcode + int32_t getTableSwitchHigh() const { + MOZ_ASSERT(is(JSOp::TableSwitch)); + return GET_JUMP_OFFSET(rawBytecode_ + (2 * JUMP_OFFSET_LEN)); + } + + uint32_t getPopCount() const { + MOZ_ASSERT(is(JSOp::PopN)); + return GET_UINT16(rawBytecode_); + } + + uint32_t getDupAtIndex() const { + MOZ_ASSERT(is(JSOp::DupAt)); + return GET_UINT24(rawBytecode_); + } + + uint8_t getPickDepth() const { + MOZ_ASSERT(is(JSOp::Pick)); + return GET_UINT8(rawBytecode_); + } + uint8_t getUnpickDepth() const { + MOZ_ASSERT(is(JSOp::Unpick)); + return GET_UINT8(rawBytecode_); + } + + uint32_t getEnvCalleeNumHops() const { + MOZ_ASSERT(is(JSOp::EnvCallee)); + return GET_UINT8(rawBytecode_); + } + + EnvironmentCoordinate getEnvironmentCoordinate() const { + MOZ_ASSERT(JOF_OPTYPE(getOp()) == JOF_ENVCOORD); + return EnvironmentCoordinate(rawBytecode_); + } + + uint32_t getCallArgc() const { + MOZ_ASSERT(JOF_OPTYPE(getOp()) == JOF_ARGC); + return GET_ARGC(rawBytecode_); + } + + uint32_t getInitElemArrayIndex() const { + MOZ_ASSERT(is(JSOp::InitElemArray)); + uint32_t index = GET_UINT32(rawBytecode_); + MOZ_ASSERT(index <= INT32_MAX, + "the bytecode emitter must never generate JSOp::InitElemArray " + "with an index exceeding int32_t range"); + return index; + } + + FunctionPrefixKind getFunctionPrefixKind() const { + MOZ_ASSERT(is(JSOp::SetFunName)); + return FunctionPrefixKind(GET_UINT8(rawBytecode_)); + } + + CheckIsObjectKind getCheckIsObjectKind() const { + MOZ_ASSERT(is(JSOp::CheckIsObj)); + return CheckIsObjectKind(GET_UINT8(rawBytecode_)); + } + + BuiltinObjectKind getBuiltinObjectKind() const { + MOZ_ASSERT(is(JSOp::BuiltinObject)); + return BuiltinObjectKind(GET_UINT8(rawBytecode_)); + } + + uint32_t getNewArrayLength() const { + MOZ_ASSERT(is(JSOp::NewArray)); + return GET_UINT32(rawBytecode_); + } + + int8_t getInt8() const { + MOZ_ASSERT(is(JSOp::Int8)); + return GET_INT8(rawBytecode_); + } + uint16_t getUint16() const { + MOZ_ASSERT(is(JSOp::Uint16)); + return GET_UINT16(rawBytecode_); + } + uint32_t getUint24() const { + MOZ_ASSERT(is(JSOp::Uint24)); + return GET_UINT24(rawBytecode_); + } + int32_t getInt32() const { + MOZ_ASSERT(is(JSOp::Int32)); + return GET_INT32(rawBytecode_); + } + uint32_t getResumeIndex() const { + MOZ_ASSERT(is(JSOp::ResumeIndex) || is(JSOp::InitialYield) || + is(JSOp::Yield) || is(JSOp::Await)); + return GET_RESUMEINDEX(rawBytecode_); + } + Value getInlineValue() const { + MOZ_ASSERT(is(JSOp::Double)); + return GET_INLINE_VALUE(rawBytecode_); + } + + GeneratorResumeKind resumeKind() { return ResumeKindFromPC(rawBytecode_); } + + ThrowMsgKind throwMsgKind() { + MOZ_ASSERT(is(JSOp::ThrowMsg)); + return static_cast(GET_UINT8(rawBytecode_)); + } + +#ifdef DEBUG + // To ease writing assertions + bool isValid() const { return isValid(debugOnlyScript_); } + + bool isInBounds() const { return isInBounds(debugOnlyScript_); } +#endif +}; + +} // namespace js + +#endif diff --git a/js/src/vm/BytecodeUtil-inl.h b/js/src/vm/BytecodeUtil-inl.h new file mode 100644 index 0000000000..776bfa1b91 --- /dev/null +++ b/js/src/vm/BytecodeUtil-inl.h @@ -0,0 +1,240 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeUtil_inl_h +#define vm_BytecodeUtil_inl_h + +#include "vm/BytecodeUtil.h" + +#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator +#include "vm/JSScript.h" + +namespace js { + +static inline unsigned GetDefCount(jsbytecode* pc) { + /* + * Add an extra pushed value for Or/And opcodes, so that they are included + * in the pushed array of stack values for type inference. + */ + switch (JSOp(*pc)) { + case JSOp::Or: + case JSOp::And: + case JSOp::Coalesce: + return 1; + case JSOp::Pick: + case JSOp::Unpick: + /* + * Pick pops and pushes how deep it looks in the stack + 1 + * items. i.e. if the stack were |a b[2] c[1] d[0]|, pick 2 + * would pop b, c, and d to rearrange the stack to |a c[0] + * d[1] b[2]|. + */ + return pc[1] + 1; + default: + return StackDefs(pc); + } +} + +static inline unsigned GetUseCount(jsbytecode* pc) { + if (JSOp(*pc) == JSOp::Pick || JSOp(*pc) == JSOp::Unpick) { + return pc[1] + 1; + } + + return StackUses(pc); +} + +static inline JSOp ReverseCompareOp(JSOp op) { + switch (op) { + case JSOp::Gt: + return JSOp::Lt; + case JSOp::Ge: + return JSOp::Le; + case JSOp::Lt: + return JSOp::Gt; + case JSOp::Le: + return JSOp::Ge; + case JSOp::Eq: + case JSOp::Ne: + case JSOp::StrictEq: + case JSOp::StrictNe: + return op; + default: + MOZ_CRASH("unrecognized op"); + } +} + +static inline JSOp NegateCompareOp(JSOp op) { + switch (op) { + case JSOp::Gt: + return JSOp::Le; + case JSOp::Ge: + return JSOp::Lt; + case JSOp::Lt: + return JSOp::Ge; + case JSOp::Le: + return JSOp::Gt; + case JSOp::Eq: + return JSOp::Ne; + case JSOp::Ne: + return JSOp::Eq; + case JSOp::StrictNe: + return JSOp::StrictEq; + case JSOp::StrictEq: + return JSOp::StrictNe; + default: + MOZ_CRASH("unrecognized op"); + } +} + +class BytecodeRange { + public: + BytecodeRange(JSContext* cx, JSScript* script) + : script(cx, script), pc(script->code()), end(pc + script->length()) {} + bool empty() const { return pc == end; } + jsbytecode* frontPC() const { return pc; } + JSOp frontOpcode() const { return JSOp(*pc); } + size_t frontOffset() const { return script->pcToOffset(pc); } + void popFront() { pc += GetBytecodeLength(pc); } + + private: + RootedScript script; + jsbytecode* pc; + jsbytecode* end; +}; + +class BytecodeRangeWithPosition : private BytecodeRange { + public: + using BytecodeRange::empty; + using BytecodeRange::frontOffset; + using BytecodeRange::frontOpcode; + using BytecodeRange::frontPC; + + BytecodeRangeWithPosition(JSContext* cx, JSScript* script) + : BytecodeRange(cx, script), + initialLine(script->lineno()), + lineno(script->lineno()), + column(script->column()), + sn(script->notes()), + snpc(script->code()), + isEntryPoint(false), + isBreakpoint(false), + seenStepSeparator(false), + wasArtifactEntryPoint(false) { + if (!sn->isTerminator()) { + snpc += sn->delta(); + } + updatePosition(); + while (frontPC() != script->main()) { + popFront(); + } + + if (frontOpcode() != JSOp::JumpTarget) { + isEntryPoint = true; + } else { + wasArtifactEntryPoint = true; + } + } + + void popFront() { + BytecodeRange::popFront(); + if (empty()) { + isEntryPoint = false; + } else { + updatePosition(); + } + + // The following conditions are handling artifacts introduced by the + // bytecode emitter, such that we do not add breakpoints on empty + // statements of the source code of the user. + if (wasArtifactEntryPoint) { + wasArtifactEntryPoint = false; + isEntryPoint = true; + } + + if (isEntryPoint && frontOpcode() == JSOp::JumpTarget) { + wasArtifactEntryPoint = isEntryPoint; + isEntryPoint = false; + } + } + + size_t frontLineNumber() const { return lineno; } + size_t frontColumnNumber() const { return column; } + + // Entry points are restricted to bytecode offsets that have an + // explicit mention in the line table. This restriction avoids a + // number of failing cases caused by some instructions not having + // sensible (to the user) line numbers, and it is one way to + // implement the idea that the bytecode emitter should tell the + // debugger exactly which offsets represent "interesting" (to the + // user) places to stop. + bool frontIsEntryPoint() const { return isEntryPoint; } + + // Breakable points are explicitly marked by the emitter as locations where + // the debugger may want to allow users to pause. + bool frontIsBreakablePoint() const { return isBreakpoint; } + + // Breakable step points are the first breakable point after a + // SrcNote::StepSep note has been encountered. + bool frontIsBreakableStepPoint() const { + return isBreakpoint && seenStepSeparator; + } + + private: + void updatePosition() { + if (isBreakpoint) { + isBreakpoint = false; + seenStepSeparator = false; + } + + // Determine the current line number by reading all source notes up to + // and including the current offset. + jsbytecode* lastLinePC = nullptr; + SrcNoteIterator iter(sn); + for (; !iter.atEnd() && snpc <= frontPC(); + ++iter, snpc += (*iter)->delta()) { + auto sn = *iter; + + SrcNoteType type = sn->type(); + if (type == SrcNoteType::ColSpan) { + ptrdiff_t colspan = SrcNote::ColSpan::getSpan(sn); + MOZ_ASSERT(ptrdiff_t(column) + colspan >= 0); + column += colspan; + lastLinePC = snpc; + } else if (type == SrcNoteType::SetLine) { + lineno = SrcNote::SetLine::getLine(sn, initialLine); + column = 0; + lastLinePC = snpc; + } else if (type == SrcNoteType::NewLine) { + lineno++; + column = 0; + lastLinePC = snpc; + } else if (type == SrcNoteType::Breakpoint) { + isBreakpoint = true; + lastLinePC = snpc; + } else if (type == SrcNoteType::StepSep) { + seenStepSeparator = true; + lastLinePC = snpc; + } + } + + sn = *iter; + isEntryPoint = lastLinePC == frontPC(); + } + + size_t initialLine; + size_t lineno; + size_t column; + const SrcNote* sn; + jsbytecode* snpc; + bool isEntryPoint; + bool isBreakpoint; + bool seenStepSeparator; + bool wasArtifactEntryPoint; +}; + +} // namespace js + +#endif /* vm_BytecodeUtil_inl_h */ diff --git a/js/src/vm/BytecodeUtil.cpp b/js/src/vm/BytecodeUtil.cpp new file mode 100644 index 0000000000..cb753a00ec --- /dev/null +++ b/js/src/vm/BytecodeUtil.cpp @@ -0,0 +1,3036 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +/* + * JS bytecode descriptors, disassemblers, and (expression) decompilers. + */ + +#include "vm/BytecodeUtil-inl.h" + +#define __STDC_FORMAT_MACROS + +#include "mozilla/Attributes.h" +#include "mozilla/Maybe.h" +#include "mozilla/ReverseIterator.h" +#include "mozilla/Sprintf.h" +#include "mozilla/Vector.h" + +#include +#include +#include +#include +#include + +#include "jsapi.h" +#include "jsnum.h" +#include "jstypes.h" + +#include "frontend/BytecodeCompiler.h" +#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator +#include "gc/PublicIterators.h" +#include "jit/IonScript.h" // IonBlockCounts +#include "js/CharacterEncoding.h" +#include "js/experimental/CodeCoverage.h" +#include "js/friend/DumpFunctions.h" // js::DumpPC, js::DumpScript +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "js/Printf.h" +#include "js/Symbol.h" +#include "util/DifferentialTesting.h" +#include "util/Memory.h" +#include "util/StringBuffer.h" +#include "util/Text.h" +#include "vm/BuiltinObjectKind.h" +#include "vm/BytecodeIterator.h" // for AllBytecodesIterable +#include "vm/BytecodeLocation.h" +#include "vm/CodeCoverage.h" +#include "vm/EnvironmentObject.h" +#include "vm/FrameIter.h" // js::{,Script}FrameIter +#include "vm/JSAtom.h" +#include "vm/JSContext.h" +#include "vm/JSFunction.h" +#include "vm/JSObject.h" +#include "vm/JSScript.h" +#include "vm/Opcodes.h" +#include "vm/Printer.h" +#include "vm/Realm.h" +#include "vm/Shape.h" +#include "vm/ToSource.h" // js::ValueToSource + +#include "gc/GC-inl.h" +#include "vm/BytecodeIterator-inl.h" +#include "vm/BytecodeLocation-inl.h" +#include "vm/JSContext-inl.h" +#include "vm/JSObject-inl.h" +#include "vm/JSScript-inl.h" +#include "vm/Realm-inl.h" + +using namespace js; + +using js::frontend::IsIdentifier; + +/* + * Index limit must stay within 32 bits. + */ +static_assert(sizeof(uint32_t) * CHAR_BIT >= INDEX_LIMIT_LOG2 + 1); + +const JSCodeSpec js::CodeSpecTable[] = { +#define MAKE_CODESPEC(op, op_snake, token, length, nuses, ndefs, format) \ + {length, nuses, ndefs, format}, + FOR_EACH_OPCODE(MAKE_CODESPEC) +#undef MAKE_CODESPEC +}; + +/* + * Each element of the array is either a source literal associated with JS + * bytecode or null. + */ +static const char* const CodeToken[] = { +#define TOKEN(op, op_snake, token, ...) token, + FOR_EACH_OPCODE(TOKEN) +#undef TOKEN +}; + +/* + * Array of JS bytecode names used by PC count JSON, DEBUG-only Disassemble + * and JIT debug spew. + */ +const char* const js::CodeNameTable[] = { +#define OPNAME(op, ...) #op, + FOR_EACH_OPCODE(OPNAME) +#undef OPNAME +}; + +/************************************************************************/ + +static bool DecompileArgumentFromStack(JSContext* cx, int formalIndex, + UniqueChars* res); + +/* static */ const char PCCounts::numExecName[] = "interp"; + +static MOZ_MUST_USE bool DumpIonScriptCounts(Sprinter* sp, HandleScript script, + jit::IonScriptCounts* ionCounts) { + if (!sp->jsprintf("IonScript [%zu blocks]:\n", ionCounts->numBlocks())) { + return false; + } + + for (size_t i = 0; i < ionCounts->numBlocks(); i++) { + const jit::IonBlockCounts& block = ionCounts->block(i); + unsigned lineNumber = 0, columnNumber = 0; + lineNumber = PCToLineNumber(script, script->offsetToPC(block.offset()), + &columnNumber); + if (!sp->jsprintf("BB #%" PRIu32 " [%05u,%u,%u]", block.id(), + block.offset(), lineNumber, columnNumber)) { + return false; + } + if (block.description()) { + if (!sp->jsprintf(" [inlined %s]", block.description())) { + return false; + } + } + for (size_t j = 0; j < block.numSuccessors(); j++) { + if (!sp->jsprintf(" -> #%" PRIu32, block.successor(j))) { + return false; + } + } + if (!sp->jsprintf(" :: %" PRIu64 " hits\n", block.hitCount())) { + return false; + } + if (!sp->jsprintf("%s\n", block.code())) { + return false; + } + } + + return true; +} + +static MOZ_MUST_USE bool DumpPCCounts(JSContext* cx, HandleScript script, + Sprinter* sp) { + MOZ_ASSERT(script->hasScriptCounts()); + + // Ensure the Disassemble1 call below does not discard the script counts. + gc::AutoSuppressGC suppress(cx); + +#ifdef DEBUG + jsbytecode* pc = script->code(); + while (pc < script->codeEnd()) { + jsbytecode* next = GetNextPc(pc); + + if (!Disassemble1(cx, script, pc, script->pcToOffset(pc), true, sp)) { + return false; + } + + if (!sp->put(" {")) { + return false; + } + + PCCounts* counts = script->maybeGetPCCounts(pc); + if (double val = counts ? counts->numExec() : 0.0) { + if (!sp->jsprintf("\"%s\": %.0f", PCCounts::numExecName, val)) { + return false; + } + } + if (!sp->put("}\n")) { + return false; + } + + pc = next; + } +#endif + + jit::IonScriptCounts* ionCounts = script->getIonCounts(); + while (ionCounts) { + if (!DumpIonScriptCounts(sp, script, ionCounts)) { + return false; + } + + ionCounts = ionCounts->previous(); + } + + return true; +} + +bool js::DumpRealmPCCounts(JSContext* cx) { + Rooted> scripts(cx, GCVector(cx)); + for (auto base = cx->zone()->cellIter(); !base.done(); + base.next()) { + if (base->realm() != cx->realm()) { + continue; + } + MOZ_ASSERT_IF(base->hasScriptCounts(), base->hasBytecode()); + if (base->hasScriptCounts()) { + if (!scripts.append(base->asJSScript())) { + return false; + } + } + } + + for (uint32_t i = 0; i < scripts.length(); i++) { + HandleScript script = scripts[i]; + Sprinter sprinter(cx); + if (!sprinter.init()) { + return false; + } + + fprintf(stdout, "--- SCRIPT %s:%u ---\n", script->filename(), + script->lineno()); + if (!DumpPCCounts(cx, script, &sprinter)) { + return false; + } + fputs(sprinter.string(), stdout); + fprintf(stdout, "--- END SCRIPT %s:%u ---\n", script->filename(), + script->lineno()); + } + + return true; +} + +///////////////////////////////////////////////////////////////////// +// Bytecode Parser +///////////////////////////////////////////////////////////////////// + +// Stores the information about the stack slot, where the value comes from. +// Elements of BytecodeParser::Bytecode.{offsetStack,offsetStackAfter} arrays. +class OffsetAndDefIndex { + // The offset of the PC that pushed the value for this slot. + uint32_t offset_; + + // The index in `ndefs` for the PC (0-origin) + uint8_t defIndex_; + + enum : uint8_t { + Normal = 0, + + // Ignored this value in the expression decompilation. + // Used by JSOp::NopDestructuring. See BytecodeParser::simulateOp. + Ignored, + + // The value in this slot comes from 2 or more paths. + // offset_ and defIndex_ holds the information for the path that + // reaches here first. + Merged, + } type_; + + public: + uint32_t offset() const { + MOZ_ASSERT(!isSpecial()); + return offset_; + }; + uint32_t specialOffset() const { + MOZ_ASSERT(isSpecial()); + return offset_; + }; + + uint8_t defIndex() const { + MOZ_ASSERT(!isSpecial()); + return defIndex_; + } + uint8_t specialDefIndex() const { + MOZ_ASSERT(isSpecial()); + return defIndex_; + } + + bool isSpecial() const { return type_ != Normal; } + bool isMerged() const { return type_ == Merged; } + bool isIgnored() const { return type_ == Ignored; } + + void set(uint32_t aOffset, uint8_t aDefIndex) { + offset_ = aOffset; + defIndex_ = aDefIndex; + type_ = Normal; + } + + // Keep offset_ and defIndex_ values for stack dump. + void setMerged() { type_ = Merged; } + void setIgnored() { type_ = Ignored; } + + bool operator==(const OffsetAndDefIndex& rhs) const { + return offset_ == rhs.offset_ && defIndex_ == rhs.defIndex_; + } + + bool operator!=(const OffsetAndDefIndex& rhs) const { + return !(*this == rhs); + } +}; + +namespace { + +class BytecodeParser { + public: + enum class JumpKind { + Simple, + SwitchCase, + SwitchDefault, + TryCatch, + TryFinally + }; + + private: + class Bytecode { + public: + explicit Bytecode(const LifoAllocPolicy& alloc) + : parsed(false), + stackDepth(0), + offsetStack(nullptr) +#if defined(DEBUG) || defined(JS_JITSPEW) + , + stackDepthAfter(0), + offsetStackAfter(nullptr), + jumpOrigins(alloc) +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + { + } + + // Whether this instruction has been analyzed to get its output defines + // and stack. + bool parsed; + + // Stack depth before this opcode. + uint32_t stackDepth; + + // Pointer to array of |stackDepth| offsets. An element at position N + // in the array is the offset of the opcode that defined the + // corresponding stack slot. The top of the stack is at position + // |stackDepth - 1|. + OffsetAndDefIndex* offsetStack; + +#if defined(DEBUG) || defined(JS_JITSPEW) + // stack depth after this opcode. + uint32_t stackDepthAfter; + + // Pointer to array of |stackDepthAfter| offsets. + OffsetAndDefIndex* offsetStackAfter; + + struct JumpInfo { + uint32_t from; + JumpKind kind; + + JumpInfo(uint32_t from_, JumpKind kind_) : from(from_), kind(kind_) {} + }; + + // A list of offsets of the bytecode that jumps to this bytecode, + // exclusing previous bytecode. + Vector> jumpOrigins; +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + + bool captureOffsetStack(LifoAlloc& alloc, const OffsetAndDefIndex* stack, + uint32_t depth) { + stackDepth = depth; + if (stackDepth) { + offsetStack = alloc.newArray(stackDepth); + if (!offsetStack) { + return false; + } + for (uint32_t n = 0; n < stackDepth; n++) { + offsetStack[n] = stack[n]; + } + } + return true; + } + +#if defined(DEBUG) || defined(JS_JITSPEW) + bool captureOffsetStackAfter(LifoAlloc& alloc, + const OffsetAndDefIndex* stack, + uint32_t depth) { + stackDepthAfter = depth; + if (stackDepthAfter) { + offsetStackAfter = alloc.newArray(stackDepthAfter); + if (!offsetStackAfter) { + return false; + } + for (uint32_t n = 0; n < stackDepthAfter; n++) { + offsetStackAfter[n] = stack[n]; + } + } + return true; + } + + bool addJump(uint32_t from, JumpKind kind) { + return jumpOrigins.append(JumpInfo(from, kind)); + } +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + + // When control-flow merges, intersect the stacks, marking slots that + // are defined by different offsets and/or defIndices merged. + // This is sufficient for forward control-flow. It doesn't grok loops + // -- for that you would have to iterate to a fixed point -- but there + // shouldn't be operands on the stack at a loop back-edge anyway. + void mergeOffsetStack(const OffsetAndDefIndex* stack, uint32_t depth) { + MOZ_ASSERT(depth == stackDepth); + for (uint32_t n = 0; n < stackDepth; n++) { + if (stack[n].isIgnored()) { + continue; + } + if (offsetStack[n].isIgnored()) { + offsetStack[n] = stack[n]; + } + if (offsetStack[n] != stack[n]) { + offsetStack[n].setMerged(); + } + } + } + }; + + JSContext* cx_; + LifoAlloc& alloc_; + RootedScript script_; + + Bytecode** codeArray_; + +#if defined(DEBUG) || defined(JS_JITSPEW) + // Dedicated mode for stack dump. + // Capture stack after each opcode, and also enable special handling for + // some opcodes to make stack transition clearer. + bool isStackDump; +#endif + + public: + BytecodeParser(JSContext* cx, LifoAlloc& alloc, JSScript* script) + : cx_(cx), + alloc_(alloc), + script_(cx, script), + codeArray_(nullptr) +#ifdef DEBUG + , + isStackDump(false) +#endif + { + } + + bool parse(); + +#if defined(DEBUG) || defined(JS_JITSPEW) + bool isReachable(const jsbytecode* pc) const { return maybeCode(pc); } +#endif + + uint32_t stackDepthAtPC(uint32_t offset) const { + // Sometimes the code generator in debug mode asks about the stack depth + // of unreachable code (bug 932180 comment 22). Assume that unreachable + // code has no operands on the stack. + return getCode(offset).stackDepth; + } + uint32_t stackDepthAtPC(const jsbytecode* pc) const { + return stackDepthAtPC(script_->pcToOffset(pc)); + } + +#if defined(DEBUG) || defined(JS_JITSPEW) + uint32_t stackDepthAfterPC(uint32_t offset) const { + return getCode(offset).stackDepthAfter; + } + uint32_t stackDepthAfterPC(const jsbytecode* pc) const { + return stackDepthAfterPC(script_->pcToOffset(pc)); + } +#endif + + const OffsetAndDefIndex& offsetForStackOperand(uint32_t offset, + int operand) const { + Bytecode& code = getCode(offset); + if (operand < 0) { + operand += code.stackDepth; + MOZ_ASSERT(operand >= 0); + } + MOZ_ASSERT(uint32_t(operand) < code.stackDepth); + return code.offsetStack[operand]; + } + jsbytecode* pcForStackOperand(jsbytecode* pc, int operand, + uint8_t* defIndex) const { + size_t offset = script_->pcToOffset(pc); + const OffsetAndDefIndex& offsetAndDefIndex = + offsetForStackOperand(offset, operand); + if (offsetAndDefIndex.isSpecial()) { + return nullptr; + } + *defIndex = offsetAndDefIndex.defIndex(); + return script_->offsetToPC(offsetAndDefIndex.offset()); + } + +#if defined(DEBUG) || defined(JS_JITSPEW) + const OffsetAndDefIndex& offsetForStackOperandAfterPC(uint32_t offset, + int operand) const { + Bytecode& code = getCode(offset); + if (operand < 0) { + operand += code.stackDepthAfter; + MOZ_ASSERT(operand >= 0); + } + MOZ_ASSERT(uint32_t(operand) < code.stackDepthAfter); + return code.offsetStackAfter[operand]; + } + + template + bool forEachJumpOrigins(jsbytecode* pc, Callback callback) const { + Bytecode& code = getCode(script_->pcToOffset(pc)); + + for (Bytecode::JumpInfo& info : code.jumpOrigins) { + if (!callback(script_->offsetToPC(info.from), info.kind)) { + return false; + } + } + + return true; + } + + void setStackDump() { isStackDump = true; } +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + + private: + LifoAlloc& alloc() { return alloc_; } + + void reportOOM() { ReportOutOfMemory(cx_); } + + uint32_t maximumStackDepth() const { + return script_->nslots() - script_->nfixed(); + } + + Bytecode& getCode(uint32_t offset) const { + MOZ_ASSERT(offset < script_->length()); + MOZ_ASSERT(codeArray_[offset]); + return *codeArray_[offset]; + } + + Bytecode* maybeCode(uint32_t offset) const { + MOZ_ASSERT(offset < script_->length()); + return codeArray_[offset]; + } + +#if defined(DEBUG) || defined(JS_JITSPEW) + Bytecode* maybeCode(const jsbytecode* pc) const { + return maybeCode(script_->pcToOffset(pc)); + } +#endif + + uint32_t simulateOp(JSOp op, uint32_t offset, OffsetAndDefIndex* offsetStack, + uint32_t stackDepth); + + inline bool recordBytecode(uint32_t offset, + const OffsetAndDefIndex* offsetStack, + uint32_t stackDepth); + + inline bool addJump(uint32_t offset, uint32_t stackDepth, + const OffsetAndDefIndex* offsetStack, jsbytecode* pc, + JumpKind kind); +}; + +} // anonymous namespace + +uint32_t BytecodeParser::simulateOp(JSOp op, uint32_t offset, + OffsetAndDefIndex* offsetStack, + uint32_t stackDepth) { + jsbytecode* pc = script_->offsetToPC(offset); + uint32_t nuses = GetUseCount(pc); + uint32_t ndefs = GetDefCount(pc); + + MOZ_ASSERT(stackDepth >= nuses); + stackDepth -= nuses; + MOZ_ASSERT(stackDepth + ndefs <= maximumStackDepth()); + +#ifdef DEBUG + if (isStackDump) { + // Opcodes that modifies the object but keeps it on the stack while + // initialization should be listed here instead of switch below. + // For error message, they shouldn't be shown as the original object + // after adding properties. + // For stack dump, keeping the input is better. + switch (op) { + case JSOp::InitHiddenProp: + case JSOp::InitHiddenPropGetter: + case JSOp::InitHiddenPropSetter: + case JSOp::InitLockedProp: + case JSOp::InitProp: + case JSOp::InitPropGetter: + case JSOp::InitPropSetter: + case JSOp::SetFunName: + // Keep the second value. + MOZ_ASSERT(nuses == 2); + MOZ_ASSERT(ndefs == 1); + goto end; + + case JSOp::InitElem: + case JSOp::InitElemGetter: + case JSOp::InitElemSetter: + case JSOp::InitHiddenElem: + case JSOp::InitHiddenElemGetter: + case JSOp::InitHiddenElemSetter: + case JSOp::InitLockedElem: + // Keep the third value. + MOZ_ASSERT(nuses == 3); + MOZ_ASSERT(ndefs == 1); + goto end; + + default: + break; + } + } +#endif /* DEBUG */ + + // Mark the current offset as defining its values on the offset stack, + // unless it just reshuffles the stack. In that case we want to preserve + // the opcode that generated the original value. + switch (op) { + default: + for (uint32_t n = 0; n != ndefs; ++n) { + offsetStack[stackDepth + n].set(offset, n); + } + break; + + case JSOp::NopDestructuring: + // Poison the last offset to not obfuscate the error message. + offsetStack[stackDepth - 1].setIgnored(); + break; + + case JSOp::Case: + // Keep the switch value. + MOZ_ASSERT(ndefs == 1); + break; + + case JSOp::Dup: + MOZ_ASSERT(ndefs == 2); + offsetStack[stackDepth + 1] = offsetStack[stackDepth]; + break; + + case JSOp::Dup2: + MOZ_ASSERT(ndefs == 4); + offsetStack[stackDepth + 2] = offsetStack[stackDepth]; + offsetStack[stackDepth + 3] = offsetStack[stackDepth + 1]; + break; + + case JSOp::DupAt: { + MOZ_ASSERT(ndefs == 1); + unsigned n = GET_UINT24(pc); + MOZ_ASSERT(n < stackDepth); + offsetStack[stackDepth] = offsetStack[stackDepth - 1 - n]; + break; + } + + case JSOp::Swap: { + MOZ_ASSERT(ndefs == 2); + OffsetAndDefIndex tmp = offsetStack[stackDepth + 1]; + offsetStack[stackDepth + 1] = offsetStack[stackDepth]; + offsetStack[stackDepth] = tmp; + break; + } + + case JSOp::Pick: { + unsigned n = GET_UINT8(pc); + MOZ_ASSERT(ndefs == n + 1); + uint32_t top = stackDepth + n; + OffsetAndDefIndex tmp = offsetStack[stackDepth]; + for (uint32_t i = stackDepth; i < top; i++) { + offsetStack[i] = offsetStack[i + 1]; + } + offsetStack[top] = tmp; + break; + } + + case JSOp::Unpick: { + unsigned n = GET_UINT8(pc); + MOZ_ASSERT(ndefs == n + 1); + uint32_t top = stackDepth + n; + OffsetAndDefIndex tmp = offsetStack[top]; + for (uint32_t i = top; i > stackDepth; i--) { + offsetStack[i] = offsetStack[i - 1]; + } + offsetStack[stackDepth] = tmp; + break; + } + + case JSOp::And: + case JSOp::CheckIsObj: + case JSOp::CheckObjCoercible: + case JSOp::CheckThis: + case JSOp::CheckThisReinit: + case JSOp::CheckClassHeritage: + case JSOp::DebugCheckSelfHosted: + case JSOp::InitGLexical: + case JSOp::InitLexical: + case JSOp::Or: + case JSOp::Coalesce: + case JSOp::SetAliasedVar: + case JSOp::SetArg: + case JSOp::SetIntrinsic: + case JSOp::SetLocal: + case JSOp::InitAliasedLexical: + case JSOp::CheckLexical: + case JSOp::CheckAliasedLexical: + // Keep the top value. + MOZ_ASSERT(nuses == 1); + MOZ_ASSERT(ndefs == 1); + break; + + case JSOp::InitHomeObject: + // Pop the top value, keep the other value. + MOZ_ASSERT(nuses == 2); + MOZ_ASSERT(ndefs == 1); + break; + + case JSOp::CheckResumeKind: + // Pop the top two values, keep the other value. + MOZ_ASSERT(nuses == 3); + MOZ_ASSERT(ndefs == 1); + break; + + case JSOp::SetGName: + case JSOp::SetName: + case JSOp::SetProp: + case JSOp::StrictSetGName: + case JSOp::StrictSetName: + case JSOp::StrictSetProp: + // Keep the top value, removing other 1 value. + MOZ_ASSERT(nuses == 2); + MOZ_ASSERT(ndefs == 1); + offsetStack[stackDepth] = offsetStack[stackDepth + 1]; + break; + + case JSOp::SetPropSuper: + case JSOp::StrictSetPropSuper: + // Keep the top value, removing other 2 values. + MOZ_ASSERT(nuses == 3); + MOZ_ASSERT(ndefs == 1); + offsetStack[stackDepth] = offsetStack[stackDepth + 2]; + break; + + case JSOp::SetElemSuper: + case JSOp::StrictSetElemSuper: + // Keep the top value, removing other 3 values. + MOZ_ASSERT(nuses == 4); + MOZ_ASSERT(ndefs == 1); + offsetStack[stackDepth] = offsetStack[stackDepth + 3]; + break; + + case JSOp::IsGenClosing: + case JSOp::IsNoIter: + case JSOp::MoreIter: + case JSOp::OptimizeSpreadCall: + // Keep the top value and push one more value. + MOZ_ASSERT(nuses == 1); + MOZ_ASSERT(ndefs == 2); + offsetStack[stackDepth + 1].set(offset, 1); + break; + + case JSOp::CheckPrivateField: + // Keep the top two values, and push one new value. + MOZ_ASSERT(nuses == 2); + MOZ_ASSERT(ndefs == 3); + offsetStack[stackDepth + 2].set(offset, 2); + break; + } + +#ifdef DEBUG +end: +#endif /* DEBUG */ + + stackDepth += ndefs; + return stackDepth; +} + +bool BytecodeParser::recordBytecode(uint32_t offset, + const OffsetAndDefIndex* offsetStack, + uint32_t stackDepth) { + MOZ_ASSERT(offset < script_->length()); + + Bytecode*& code = codeArray_[offset]; + if (!code) { + code = alloc().new_(alloc()); + if (!code || !code->captureOffsetStack(alloc(), offsetStack, stackDepth)) { + reportOOM(); + return false; + } + } else { + code->mergeOffsetStack(offsetStack, stackDepth); + } + + return true; +} + +bool BytecodeParser::addJump(uint32_t offset, uint32_t stackDepth, + const OffsetAndDefIndex* offsetStack, + jsbytecode* pc, JumpKind kind) { + if (!recordBytecode(offset, offsetStack, stackDepth)) { + return false; + } + +#ifdef DEBUG + uint32_t currentOffset = script_->pcToOffset(pc); + if (isStackDump) { + if (!codeArray_[offset]->addJump(currentOffset, kind)) { + reportOOM(); + return false; + } + } + + // If this is a backedge, assert we parsed the target JSOp::LoopHead. + MOZ_ASSERT_IF(offset < currentOffset, codeArray_[offset]->parsed); +#endif /* DEBUG */ + + return true; +} + +bool BytecodeParser::parse() { + MOZ_ASSERT(!codeArray_); + + uint32_t length = script_->length(); + codeArray_ = alloc().newArray(length); + + if (!codeArray_) { + reportOOM(); + return false; + } + + mozilla::PodZero(codeArray_, length); + + // Fill in stack depth and definitions at initial bytecode. + Bytecode* startcode = alloc().new_(alloc()); + if (!startcode) { + reportOOM(); + return false; + } + + // Fill in stack depth and definitions at initial bytecode. + OffsetAndDefIndex* offsetStack = + alloc().newArray(maximumStackDepth()); + if (maximumStackDepth() && !offsetStack) { + reportOOM(); + return false; + } + + startcode->stackDepth = 0; + codeArray_[0] = startcode; + + for (uint32_t offset = 0, nextOffset = 0; offset < length; + offset = nextOffset) { + Bytecode* code = maybeCode(offset); + jsbytecode* pc = script_->offsetToPC(offset); + + // Next bytecode to analyze. + nextOffset = offset + GetBytecodeLength(pc); + + MOZ_ASSERT(*pc < JSOP_LIMIT); + JSOp op = JSOp(*pc); + + if (!code) { + // Haven't found a path by which this bytecode is reachable. + continue; + } + + // On a jump target, we reload the offsetStack saved for the current + // bytecode, as it contains either the original offset stack, or the + // merged offset stack. + if (BytecodeIsJumpTarget(op)) { + for (uint32_t n = 0; n < code->stackDepth; ++n) { + offsetStack[n] = code->offsetStack[n]; + } + } + + if (code->parsed) { + // No need to reparse. + continue; + } + + code->parsed = true; + + uint32_t stackDepth = simulateOp(op, offset, offsetStack, code->stackDepth); + +#ifdef DEBUG + if (isStackDump) { + if (!code->captureOffsetStackAfter(alloc(), offsetStack, stackDepth)) { + reportOOM(); + return false; + } + } +#endif /* DEBUG */ + + switch (op) { + case JSOp::TableSwitch: { + uint32_t defaultOffset = offset + GET_JUMP_OFFSET(pc); + jsbytecode* pc2 = pc + JUMP_OFFSET_LEN; + int32_t low = GET_JUMP_OFFSET(pc2); + pc2 += JUMP_OFFSET_LEN; + int32_t high = GET_JUMP_OFFSET(pc2); + pc2 += JUMP_OFFSET_LEN; + + if (!addJump(defaultOffset, stackDepth, offsetStack, pc, + JumpKind::SwitchDefault)) { + return false; + } + + uint32_t ncases = high - low + 1; + + for (uint32_t i = 0; i < ncases; i++) { + uint32_t targetOffset = script_->tableSwitchCaseOffset(pc, i); + if (targetOffset != defaultOffset) { + if (!addJump(targetOffset, stackDepth, offsetStack, pc, + JumpKind::SwitchCase)) { + return false; + } + } + } + break; + } + + case JSOp::Try: { + // Everything between a try and corresponding catch or finally is + // conditional. Note that there is no problem with code which is skipped + // by a thrown exception but is not caught by a later handler in the + // same function: no more code will execute, and it does not matter what + // is defined. + for (const TryNote& tn : script_->trynotes()) { + if (tn.start == offset + JSOpLength_Try) { + uint32_t catchOffset = tn.start + tn.length; + if (tn.kind() == TryNoteKind::Catch) { + if (!addJump(catchOffset, stackDepth, offsetStack, pc, + JumpKind::TryCatch)) { + return false; + } + } else if (tn.kind() == TryNoteKind::Finally) { + if (!addJump(catchOffset, stackDepth, offsetStack, pc, + JumpKind::TryFinally)) { + return false; + } + } + } + } + break; + } + + default: + break; + } + + // Check basic jump opcodes, which may or may not have a fallthrough. + if (IsJumpOpcode(op)) { + // Case instructions do not push the lvalue back when branching. + uint32_t newStackDepth = stackDepth; + if (op == JSOp::Case) { + newStackDepth--; + } + + uint32_t targetOffset = offset + GET_JUMP_OFFSET(pc); + if (!addJump(targetOffset, newStackDepth, offsetStack, pc, + JumpKind::Simple)) { + return false; + } + } + + // Handle any fallthrough from this opcode. + if (BytecodeFallsThrough(op)) { + if (!recordBytecode(nextOffset, offsetStack, stackDepth)) { + return false; + } + } + } + + return true; +} + +#if defined(DEBUG) || defined(JS_JITSPEW) + +bool js::ReconstructStackDepth(JSContext* cx, JSScript* script, jsbytecode* pc, + uint32_t* depth, bool* reachablePC) { + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), script); + if (!parser.parse()) { + return false; + } + + *reachablePC = parser.isReachable(pc); + + if (*reachablePC) { + *depth = parser.stackDepthAtPC(pc); + } + + return true; +} + +static unsigned Disassemble1(JSContext* cx, HandleScript script, jsbytecode* pc, + unsigned loc, bool lines, + const BytecodeParser* parser, Sprinter* sp); + +/* + * If pc != nullptr, include a prefix indicating whether the PC is at the + * current line. If showAll is true, include the source note type and the + * entry stack depth. + */ +static MOZ_MUST_USE bool DisassembleAtPC( + JSContext* cx, JSScript* scriptArg, bool lines, const jsbytecode* pc, + bool showAll, Sprinter* sp, + DisassembleSkeptically skeptically = DisassembleSkeptically::No) { + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + RootedScript script(cx, scriptArg); + mozilla::Maybe parser; + + if (skeptically == DisassembleSkeptically::No) { + parser.emplace(cx, allocScope.alloc(), script); + parser->setStackDump(); + if (!parser->parse()) { + return false; + } + } + + if (showAll) { + if (!sp->jsprintf("%s:%u\n", script->filename(), + unsigned(script->lineno()))) { + return false; + } + } + + if (pc != nullptr) { + if (!sp->put(" ")) { + return false; + } + } + if (showAll) { + if (!sp->put("sn stack ")) { + return false; + } + } + if (!sp->put("loc ")) { + return false; + } + if (lines) { + if (!sp->put("line")) { + return false; + } + } + if (!sp->put(" op\n")) { + return false; + } + + if (pc != nullptr) { + if (!sp->put(" ")) { + return false; + } + } + if (showAll) { + if (!sp->put("-- ----- ")) { + return false; + } + } + if (!sp->put("----- ")) { + return false; + } + if (lines) { + if (!sp->put("----")) { + return false; + } + } + if (!sp->put(" --\n")) { + return false; + } + + jsbytecode* next = script->code(); + jsbytecode* end = script->codeEnd(); + while (next < end) { + if (next == script->main()) { + if (!sp->put("main:\n")) { + return false; + } + } + if (pc != nullptr) { + if (!sp->put(pc == next ? "--> " : " ")) { + return false; + } + } + if (showAll) { + const SrcNote* sn = GetSrcNote(cx, script, next); + if (sn) { + MOZ_ASSERT(!sn->isTerminator()); + SrcNoteIterator iter(sn); + while (true) { + ++iter; + auto next = *iter; + if (!(!next->isTerminator() && next->delta() == 0)) { + break; + } + if (!sp->jsprintf("%s\n ", sn->name())) { + return false; + } + sn = *iter; + } + if (!sp->jsprintf("%s ", sn->name())) { + return false; + } + } else { + if (!sp->put(" ")) { + return false; + } + } + if (parser && parser->isReachable(next)) { + if (!sp->jsprintf("%05u ", parser->stackDepthAtPC(next))) { + return false; + } + } else { + if (!sp->put(" ")) { + return false; + } + } + } + unsigned len = Disassemble1(cx, script, next, script->pcToOffset(next), + lines, parser.ptrOr(nullptr), sp); + if (!len) { + return false; + } + + next += len; + } + + return true; +} + +bool js::Disassemble(JSContext* cx, HandleScript script, bool lines, + Sprinter* sp, DisassembleSkeptically skeptically) { + return DisassembleAtPC(cx, script, lines, nullptr, false, sp, skeptically); +} + +JS_FRIEND_API bool js::DumpPC(JSContext* cx, FILE* fp) { + gc::AutoSuppressGC suppressGC(cx); + Sprinter sprinter(cx); + if (!sprinter.init()) { + return false; + } + ScriptFrameIter iter(cx); + if (iter.done()) { + fprintf(fp, "Empty stack.\n"); + return true; + } + RootedScript script(cx, iter.script()); + bool ok = DisassembleAtPC(cx, script, true, iter.pc(), false, &sprinter); + fprintf(fp, "%s", sprinter.string()); + return ok; +} + +JS_FRIEND_API bool js::DumpScript(JSContext* cx, JSScript* scriptArg, + FILE* fp) { + gc::AutoSuppressGC suppressGC(cx); + Sprinter sprinter(cx); + if (!sprinter.init()) { + return false; + } + RootedScript script(cx, scriptArg); + bool ok = Disassemble(cx, script, true, &sprinter); + fprintf(fp, "%s", sprinter.string()); + return ok; +} + +static UniqueChars ToDisassemblySource(JSContext* cx, HandleValue v) { + if (v.isString()) { + return QuoteString(cx, v.toString(), '"'); + } + + if (JS::RuntimeHeapIsBusy()) { + return DuplicateString(cx, ""); + } + + if (v.isObject()) { + JSObject& obj = v.toObject(); + + if (obj.is()) { + RootedFunction fun(cx, &obj.as()); + JSString* str = JS_DecompileFunction(cx, fun); + if (!str) { + return nullptr; + } + return QuoteString(cx, str); + } + + if (obj.is()) { + Rooted reobj(cx, &obj.as()); + JSString* source = RegExpObject::toString(cx, reobj); + if (!source) { + return nullptr; + } + return QuoteString(cx, source); + } + } + + JSString* str = ValueToSource(cx, v); + if (!str) { + return nullptr; + } + return QuoteString(cx, str); +} + +static bool ToDisassemblySource(JSContext* cx, HandleScope scope, + UniqueChars* bytes) { + UniqueChars source = JS_smprintf("%s {", ScopeKindString(scope->kind())); + if (!source) { + ReportOutOfMemory(cx); + return false; + } + + for (Rooted bi(cx, BindingIter(scope)); bi; bi++) { + UniqueChars nameBytes = AtomToPrintableString(cx, bi.name()); + if (!nameBytes) { + return false; + } + + source = JS_sprintf_append(std::move(source), "%s: ", nameBytes.get()); + if (!source) { + ReportOutOfMemory(cx); + return false; + } + + BindingLocation loc = bi.location(); + switch (loc.kind()) { + case BindingLocation::Kind::Global: + source = JS_sprintf_append(std::move(source), "global"); + break; + + case BindingLocation::Kind::Frame: + source = + JS_sprintf_append(std::move(source), "frame slot %u", loc.slot()); + break; + + case BindingLocation::Kind::Environment: + source = + JS_sprintf_append(std::move(source), "env slot %u", loc.slot()); + break; + + case BindingLocation::Kind::Argument: + source = + JS_sprintf_append(std::move(source), "arg slot %u", loc.slot()); + break; + + case BindingLocation::Kind::NamedLambdaCallee: + source = JS_sprintf_append(std::move(source), "named lambda callee"); + break; + + case BindingLocation::Kind::Import: + source = JS_sprintf_append(std::move(source), "import"); + break; + } + + if (!source) { + ReportOutOfMemory(cx); + return false; + } + + if (!bi.isLast()) { + source = JS_sprintf_append(std::move(source), ", "); + if (!source) { + ReportOutOfMemory(cx); + return false; + } + } + } + + source = JS_sprintf_append(std::move(source), "}"); + if (!source) { + ReportOutOfMemory(cx); + return false; + } + + *bytes = std::move(source); + return true; +} + +static bool DumpJumpOrigins(HandleScript script, jsbytecode* pc, + const BytecodeParser* parser, Sprinter* sp) { + bool called = false; + auto callback = [&script, &sp, &called](jsbytecode* pc, + BytecodeParser::JumpKind kind) { + if (!called) { + called = true; + if (!sp->put("\n# ")) { + return false; + } + } else { + if (!sp->put(", ")) { + return false; + } + } + + switch (kind) { + case BytecodeParser::JumpKind::Simple: + break; + + case BytecodeParser::JumpKind::SwitchCase: + if (!sp->put("switch-case ")) { + return false; + } + break; + + case BytecodeParser::JumpKind::SwitchDefault: + if (!sp->put("switch-default ")) { + return false; + } + break; + + case BytecodeParser::JumpKind::TryCatch: + if (!sp->put("try-catch ")) { + return false; + } + break; + + case BytecodeParser::JumpKind::TryFinally: + if (!sp->put("try-finally ")) { + return false; + } + break; + } + + if (!sp->jsprintf("from %s @ %05u", CodeName(JSOp(*pc)), + unsigned(script->pcToOffset(pc)))) { + return false; + } + + return true; + }; + if (!parser->forEachJumpOrigins(pc, callback)) { + return false; + } + if (called) { + if (!sp->put("\n")) { + return false; + } + } + + return true; +} + +static bool DecompileAtPCForStackDump( + JSContext* cx, HandleScript script, + const OffsetAndDefIndex& offsetAndDefIndex, Sprinter* sp); + +static unsigned Disassemble1(JSContext* cx, HandleScript script, jsbytecode* pc, + unsigned loc, bool lines, + const BytecodeParser* parser, Sprinter* sp) { + if (parser && parser->isReachable(pc)) { + if (!DumpJumpOrigins(script, pc, parser, sp)) { + return 0; + } + } + + size_t before = sp->stringEnd() - sp->string(); + bool stackDumped = false; + auto dumpStack = [&cx, &script, &pc, &parser, &sp, &before, &stackDumped]() { + if (!parser) { + return true; + } + if (stackDumped) { + return true; + } + stackDumped = true; + + size_t after = sp->stringEnd() - sp->string(); + MOZ_ASSERT(after >= before); + + static const size_t stack_column = 40; + for (size_t i = after - before; i < stack_column - 1; i++) { + if (!sp->put(" ")) { + return false; + } + } + + if (!sp->put(" # ")) { + return false; + } + + if (!parser->isReachable(pc)) { + if (!sp->put("!!! UNREACHABLE !!!")) { + return false; + } + } else { + uint32_t depth = parser->stackDepthAfterPC(pc); + + for (uint32_t i = 0; i < depth; i++) { + if (i) { + if (!sp->put(" ")) { + return false; + } + } + + const OffsetAndDefIndex& offsetAndDefIndex = + parser->offsetForStackOperandAfterPC(script->pcToOffset(pc), i); + // This will decompile the stack for the same PC many times. + // We'll avoid optimizing it since this is a testing function + // and it won't be worth managing cached expression here. + if (!DecompileAtPCForStackDump(cx, script, offsetAndDefIndex, sp)) { + return false; + } + } + } + + return true; + }; + + if (*pc >= JSOP_LIMIT) { + char numBuf1[12], numBuf2[12]; + SprintfLiteral(numBuf1, "%d", int(*pc)); + SprintfLiteral(numBuf2, "%d", JSOP_LIMIT); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BYTECODE_TOO_BIG, numBuf1, numBuf2); + return 0; + } + JSOp op = JSOp(*pc); + const JSCodeSpec& cs = CodeSpec(op); + const unsigned len = cs.length; + if (!sp->jsprintf("%05u:", loc)) { + return 0; + } + if (lines) { + if (!sp->jsprintf("%4u", PCToLineNumber(script, pc))) { + return 0; + } + } + if (!sp->jsprintf(" %s", CodeName(op))) { + return 0; + } + + int i; + switch (JOF_TYPE(cs.format)) { + case JOF_BYTE: + break; + + case JOF_JUMP: { + ptrdiff_t off = GET_JUMP_OFFSET(pc); + if (!sp->jsprintf(" %u (%+d)", unsigned(loc + int(off)), int(off))) { + return 0; + } + break; + } + + case JOF_SCOPE: { + RootedScope scope(cx, script->getScope(pc)); + UniqueChars bytes; + if (!ToDisassemblySource(cx, scope, &bytes)) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + + case JOF_ENVCOORD: { + RootedValue v(cx, StringValue(EnvironmentCoordinateNameSlow(script, pc))); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + EnvironmentCoordinate ec(pc); + if (!sp->jsprintf(" %s (hops = %u, slot = %u)", bytes.get(), ec.hops(), + ec.slot())) { + return 0; + } + break; + } + + case JOF_ATOM: { + RootedValue v(cx, StringValue(script->getAtom(pc))); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + + case JOF_DOUBLE: { + double d = GET_INLINE_VALUE(pc).toDouble(); + if (!sp->jsprintf(" %lf", d)) { + return 0; + } + break; + } + + case JOF_BIGINT: { + RootedValue v(cx, BigIntValue(script->getBigInt(pc))); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + + case JOF_OBJECT: { + JSObject* obj = script->getObject(pc); + { + RootedValue v(cx, ObjectValue(*obj)); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + } + break; + } + + case JOF_REGEXP: { + js::RegExpObject* obj = script->getRegExp(pc); + RootedValue v(cx, ObjectValue(*obj)); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s", bytes.get())) { + return 0; + } + break; + } + + case JOF_TABLESWITCH: { + int32_t i, low, high; + + ptrdiff_t off = GET_JUMP_OFFSET(pc); + jsbytecode* pc2 = pc + JUMP_OFFSET_LEN; + low = GET_JUMP_OFFSET(pc2); + pc2 += JUMP_OFFSET_LEN; + high = GET_JUMP_OFFSET(pc2); + pc2 += JUMP_OFFSET_LEN; + if (!sp->jsprintf(" defaultOffset %d low %d high %d", int(off), low, + high)) { + return 0; + } + + // Display stack dump before diplaying the offsets for each case. + if (!dumpStack()) { + return 0; + } + + for (i = low; i <= high; i++) { + off = + script->tableSwitchCaseOffset(pc, i - low) - script->pcToOffset(pc); + if (!sp->jsprintf("\n\t%d: %d", i, int(off))) { + return 0; + } + } + break; + } + + case JOF_QARG: + if (!sp->jsprintf(" %u", GET_ARGNO(pc))) { + return 0; + } + break; + + case JOF_LOCAL: + if (!sp->jsprintf(" %u", GET_LOCALNO(pc))) { + return 0; + } + break; + + case JOF_GCTHING: + if (!sp->jsprintf(" %u", unsigned(GET_GCTHING_INDEX(pc)))) { + return 0; + } + break; + + case JOF_UINT32: + if (!sp->jsprintf(" %u", GET_UINT32(pc))) { + return 0; + } + break; + + case JOF_ICINDEX: + if (!sp->jsprintf(" (ic: %u)", GET_ICINDEX(pc))) { + return 0; + } + break; + + case JOF_LOOPHEAD: + if (!sp->jsprintf(" (ic: %u, depthHint: %u)", GET_ICINDEX(pc), + LoopHeadDepthHint(pc))) { + return 0; + } + break; + + case JOF_CLASS_CTOR: { + GCThingIndex atomIndex; + uint32_t classStartOffset = 0, classEndOffset = 0; + GetClassConstructorOperands(pc, &atomIndex, &classStartOffset, + &classEndOffset); + RootedValue v(cx, StringValue(script->getAtom(atomIndex))); + UniqueChars bytes = ToDisassemblySource(cx, v); + if (!bytes) { + return 0; + } + if (!sp->jsprintf(" %s (off: %u-%u)", bytes.get(), classStartOffset, + classEndOffset)) { + return 0; + } + break; + } + case JOF_TWO_UINT8: { + int one = (int)GET_UINT8(pc); + int two = (int)GET_UINT8(pc + 1); + + if (!sp->jsprintf(" %d", one)) { + return 0; + } + if (!sp->jsprintf(" %d", two)) { + return 0; + } + break; + } + + case JOF_ARGC: + case JOF_UINT16: + i = (int)GET_UINT16(pc); + goto print_int; + + case JOF_RESUMEINDEX: + case JOF_UINT24: + MOZ_ASSERT(len == 4); + i = (int)GET_UINT24(pc); + goto print_int; + + case JOF_UINT8: + i = GET_UINT8(pc); + goto print_int; + + case JOF_INT8: + i = GET_INT8(pc); + goto print_int; + + case JOF_INT32: + MOZ_ASSERT(op == JSOp::Int32); + i = GET_INT32(pc); + print_int: + if (!sp->jsprintf(" %d", i)) { + return 0; + } + break; + + default: { + char numBuf[12]; + SprintfLiteral(numBuf, "%x", cs.format); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_UNKNOWN_FORMAT, numBuf); + return 0; + } + } + + if (!dumpStack()) { + return 0; + } + + if (!sp->put("\n")) { + return 0; + } + return len; +} + +unsigned js::Disassemble1(JSContext* cx, JS::Handle script, + jsbytecode* pc, unsigned loc, bool lines, + Sprinter* sp) { + return Disassemble1(cx, script, pc, loc, lines, nullptr, sp); +} + +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + +namespace { +/* + * The expression decompiler is invoked by error handling code to produce a + * string representation of the erroring expression. As it's only a debugging + * tool, it only supports basic expressions. For anything complicated, it simply + * puts "(intermediate value)" into the error result. + * + * Here's the basic algorithm: + * + * 1. Find the stack location of the value whose expression we wish to + * decompile. The error handler can explicitly pass this as an + * argument. Otherwise, we search backwards down the stack for the offending + * value. + * + * 2. Instantiate and run a BytecodeParser for the current frame. This creates a + * stack of pcs parallel to the interpreter stack; given an interpreter stack + * location, the corresponding pc stack location contains the opcode that pushed + * the value in the interpreter. Now, with the result of step 1, we have the + * opcode responsible for pushing the value we want to decompile. + * + * 3. Pass the opcode to decompilePC. decompilePC is the main decompiler + * routine, responsible for a string representation of the expression that + * generated a certain stack location. decompilePC looks at one opcode and + * returns the JS source equivalent of that opcode. + * + * 4. Expressions can, of course, contain subexpressions. For example, the + * literals "4" and "5" are subexpressions of the addition operator in "4 + + * 5". If we need to decompile a subexpression, we call decompilePC (step 2) + * recursively on the operands' pcs. The result is a depth-first traversal of + * the expression tree. + * + */ +struct ExpressionDecompiler { + JSContext* cx; + RootedScript script; + const BytecodeParser& parser; + Sprinter sprinter; + +#if defined(DEBUG) || defined(JS_JITSPEW) + // Dedicated mode for stack dump. + // Generates an expression for stack dump, including internal state, + // and also disables special handling for self-hosted code. + bool isStackDump; +#endif + + ExpressionDecompiler(JSContext* cx, JSScript* script, + const BytecodeParser& parser) + : cx(cx), + script(cx, script), + parser(parser), + sprinter(cx) +#if defined(DEBUG) || defined(JS_JITSPEW) + , + isStackDump(false) +#endif + { + } + bool init(); + bool decompilePCForStackOperand(jsbytecode* pc, int i); + bool decompilePC(jsbytecode* pc, uint8_t defIndex); + bool decompilePC(const OffsetAndDefIndex& offsetAndDefIndex); + JSAtom* getArg(unsigned slot); + JSAtom* loadAtom(jsbytecode* pc); + bool quote(JSString* s, char quote); + bool write(const char* s); + bool write(JSString* str); + UniqueChars getOutput(); +#if defined(DEBUG) || defined(JS_JITSPEW) + void setStackDump() { isStackDump = true; } +#endif +}; + +bool ExpressionDecompiler::decompilePCForStackOperand(jsbytecode* pc, int i) { + return decompilePC(parser.offsetForStackOperand(script->pcToOffset(pc), i)); +} + +bool ExpressionDecompiler::decompilePC(jsbytecode* pc, uint8_t defIndex) { + MOZ_ASSERT(script->containsPC(pc)); + + JSOp op = (JSOp)*pc; + + if (const char* token = CodeToken[uint8_t(op)]) { + MOZ_ASSERT(defIndex == 0); + MOZ_ASSERT(CodeSpec(op).ndefs == 1); + + // Handle simple cases of binary and unary operators. + switch (CodeSpec(op).nuses) { + case 2: { + const SrcNote* sn = GetSrcNote(cx, script, pc); + const char* extra = + sn && sn->type() == SrcNoteType::AssignOp ? "=" : ""; + return write("(") && decompilePCForStackOperand(pc, -2) && write(" ") && + write(token) && write(extra) && write(" ") && + decompilePCForStackOperand(pc, -1) && write(")"); + break; + } + case 1: + return write("(") && write(token) && + decompilePCForStackOperand(pc, -1) && write(")"); + default: + break; + } + } + + switch (op) { + case JSOp::DelName: + return write("(delete ") && write(loadAtom(pc)) && write(")"); + + case JSOp::GetGName: + case JSOp::GetName: + case JSOp::GetIntrinsic: + return write(loadAtom(pc)); + case JSOp::GetArg: { + unsigned slot = GET_ARGNO(pc); + + // For self-hosted scripts that are called from non-self-hosted code, + // decompiling the parameter name in the self-hosted script is + // unhelpful. Decompile the argument name instead. + if (script->selfHosted() +#ifdef DEBUG + // For stack dump, argument name is not necessary. + && !isStackDump +#endif /* DEBUG */ + ) { + UniqueChars result; + if (!DecompileArgumentFromStack(cx, slot, &result)) { + return false; + } + + // Note that decompiling the argument in the parent frame might + // not succeed. + if (result) { + return write(result.get()); + } + + // If it fails, do not return parameter name and let the caller + // fallback. + return write("(intermediate value)"); + } + + JSAtom* atom = getArg(slot); + if (!atom) { + return false; + } + return write(atom); + } + case JSOp::GetLocal: { + JSAtom* atom = FrameSlotName(script, pc); + MOZ_ASSERT(atom); + return write(atom); + } + case JSOp::GetAliasedVar: { + JSAtom* atom = EnvironmentCoordinateNameSlow(script, pc); + MOZ_ASSERT(atom); + return write(atom); + } + + case JSOp::DelProp: + case JSOp::StrictDelProp: + case JSOp::GetProp: + case JSOp::GetBoundName: { + bool hasDelete = op == JSOp::DelProp || op == JSOp::StrictDelProp; + RootedAtom prop(cx, loadAtom(pc)); + MOZ_ASSERT(prop); + return (hasDelete ? write("(delete ") : true) && + decompilePCForStackOperand(pc, -1) && + (IsIdentifier(prop) + ? write(".") && quote(prop, '\0') + : write("[") && quote(prop, '\'') && write("]")) && + (hasDelete ? write(")") : true); + } + case JSOp::GetPropSuper: { + RootedAtom prop(cx, loadAtom(pc)); + return write("super.") && quote(prop, '\0'); + } + case JSOp::SetElem: + case JSOp::StrictSetElem: + // NOTE: We don't show the right hand side of the operation because + // it's used in error messages like: "a[0] is not readable". + // + // We could though. + return decompilePCForStackOperand(pc, -3) && write("[") && + decompilePCForStackOperand(pc, -2) && write("]"); + + case JSOp::DelElem: + case JSOp::StrictDelElem: + case JSOp::GetElem: { + bool hasDelete = (op == JSOp::DelElem || op == JSOp::StrictDelElem); + return (hasDelete ? write("(delete ") : true) && + decompilePCForStackOperand(pc, -2) && write("[") && + decompilePCForStackOperand(pc, -1) && write("]") && + (hasDelete ? write(")") : true); + } + + case JSOp::GetElemSuper: + return write("super[") && decompilePCForStackOperand(pc, -2) && + write("]"); + case JSOp::Null: + return write(js_null_str); + case JSOp::True: + return write(js_true_str); + case JSOp::False: + return write(js_false_str); + case JSOp::Zero: + case JSOp::One: + case JSOp::Int8: + case JSOp::Uint16: + case JSOp::Uint24: + case JSOp::Int32: + return sprinter.printf("%d", GetBytecodeInteger(pc)); + case JSOp::String: + return quote(loadAtom(pc), '"'); + case JSOp::Symbol: { + unsigned i = uint8_t(pc[1]); + MOZ_ASSERT(i < JS::WellKnownSymbolLimit); + if (i < JS::WellKnownSymbolLimit) { + return write(cx->names().wellKnownSymbolDescriptions()[i]); + } + break; + } + case JSOp::Undefined: + return write(js_undefined_str); + case JSOp::GlobalThis: + // |this| could convert to a very long object initialiser, so cite it by + // its keyword name. + return write(js_this_str); + case JSOp::NewTarget: + return write("new.target"); + case JSOp::Call: + case JSOp::CallIgnoresRv: + case JSOp::CallIter: + case JSOp::FunCall: + case JSOp::FunApply: { + uint16_t argc = GET_ARGC(pc); + return decompilePCForStackOperand(pc, -int32_t(argc + 2)) && + write(argc ? "(...)" : "()"); + } + case JSOp::SpreadCall: + return decompilePCForStackOperand(pc, -3) && write("(...)"); + case JSOp::NewArray: + return write("[]"); + case JSOp::RegExp: { + Rooted obj(cx, &script->getObject(pc)->as()); + JSString* str = RegExpObject::toString(cx, obj); + if (!str) { + return false; + } + return write(str); + } + case JSOp::Object: { + JSObject* obj = script->getObject(pc); + RootedValue objv(cx, ObjectValue(*obj)); + JSString* str = ValueToSource(cx, objv); + if (!str) { + return false; + } + return write(str); + } + case JSOp::Void: + return write("(void ") && decompilePCForStackOperand(pc, -1) && + write(")"); + + case JSOp::SuperCall: + if (GET_ARGC(pc) == 0) { + return write("super()"); + } + [[fallthrough]]; + case JSOp::SpreadSuperCall: + return write("super(...)"); + case JSOp::SuperFun: + return write("super"); + + case JSOp::Eval: + case JSOp::SpreadEval: + case JSOp::StrictEval: + case JSOp::StrictSpreadEval: + return write("eval(...)"); + + case JSOp::New: { + uint16_t argc = GET_ARGC(pc); + return write("(new ") && + decompilePCForStackOperand(pc, -int32_t(argc + 3)) && + write(argc ? "(...))" : "())"); + } + + case JSOp::SpreadNew: + return write("(new ") && decompilePCForStackOperand(pc, -4) && + write("(...))"); + + case JSOp::Typeof: + case JSOp::TypeofExpr: + return write("(typeof ") && decompilePCForStackOperand(pc, -1) && + write(")"); + + case JSOp::InitElemArray: + return write("[...]"); + + case JSOp::InitElemInc: + if (defIndex == 0) { + return write("[...]"); + } + MOZ_ASSERT(defIndex == 1); +#ifdef DEBUG + // INDEX won't be be exposed to error message. + if (isStackDump) { + return write("INDEX"); + } +#endif + break; + + case JSOp::ToNumeric: + return write("(tonumeric ") && decompilePCForStackOperand(pc, -1) && + write(")"); + + case JSOp::Inc: + return write("(inc ") && decompilePCForStackOperand(pc, -1) && write(")"); + + case JSOp::Dec: + return write("(dec ") && decompilePCForStackOperand(pc, -1) && write(")"); + + case JSOp::BigInt: +#if defined(DEBUG) || defined(JS_JITSPEW) + // BigInt::dump() only available in this configuration. + script->getBigInt(pc)->dump(sprinter); + return !sprinter.hadOutOfMemory(); +#else + return write("[bigint]"); +#endif + + case JSOp::BuiltinObject: { + auto kind = BuiltinObjectKind(GET_UINT8(pc)); + return write(BuiltinObjectName(kind)); + } + + default: + break; + } + +#ifdef DEBUG + if (isStackDump) { + // Special decompilation for stack dump. + switch (op) { + case JSOp::Arguments: + return write("arguments"); + + case JSOp::BindGName: + return write("GLOBAL"); + + case JSOp::BindName: + case JSOp::BindVar: + return write("ENV"); + + case JSOp::Callee: + return write("CALLEE"); + + case JSOp::EnvCallee: + return write("ENVCALLEE"); + + case JSOp::CallSiteObj: + return write("OBJ"); + + case JSOp::ClassConstructor: + case JSOp::DerivedConstructor: + return write("CONSTRUCTOR"); + + case JSOp::Double: + return sprinter.printf("%lf", GET_INLINE_VALUE(pc).toDouble()); + + case JSOp::Exception: + return write("EXCEPTION"); + + case JSOp::Finally: + if (defIndex == 0) { + return write("THROWING"); + } + MOZ_ASSERT(defIndex == 1); + return write("PC"); + + case JSOp::GImplicitThis: + case JSOp::FunctionThis: + case JSOp::ImplicitThis: + return write("THIS"); + + case JSOp::FunWithProto: + return write("FUN"); + + case JSOp::Generator: + return write("GENERATOR"); + + case JSOp::GetImport: + return write("VAL"); + + case JSOp::GetRval: + return write("RVAL"); + + case JSOp::Hole: + return write("HOLE"); + + case JSOp::IsGenClosing: + // For stack dump, defIndex == 0 is not used. + MOZ_ASSERT(defIndex == 1); + return write("ISGENCLOSING"); + + case JSOp::IsNoIter: + // For stack dump, defIndex == 0 is not used. + MOZ_ASSERT(defIndex == 1); + return write("ISNOITER"); + + case JSOp::IsConstructing: + return write("JS_IS_CONSTRUCTING"); + + case JSOp::Iter: + return write("ITER"); + + case JSOp::Lambda: + case JSOp::LambdaArrow: + return write("FUN"); + + case JSOp::ToAsyncIter: + return write("ASYNCITER"); + + case JSOp::MoreIter: + // For stack dump, defIndex == 0 is not used. + MOZ_ASSERT(defIndex == 1); + return write("MOREITER"); + + case JSOp::MutateProto: + return write("SUCCEEDED"); + + case JSOp::NewInit: + case JSOp::NewObject: + case JSOp::ObjWithProto: + return write("OBJ"); + + case JSOp::OptimizeSpreadCall: + // For stack dump, defIndex == 0 is not used. + MOZ_ASSERT(defIndex == 1); + return write("OPTIMIZED"); + + case JSOp::Rest: + return write("REST"); + + case JSOp::Resume: + return write("RVAL"); + + case JSOp::SuperBase: + return write("HOMEOBJECTPROTO"); + + case JSOp::ToPropertyKey: + return write("TOPROPERTYKEY(") && decompilePCForStackOperand(pc, -1) && + write(")"); + case JSOp::ToString: + return write("TOSTRING(") && decompilePCForStackOperand(pc, -1) && + write(")"); + + case JSOp::Uninitialized: + return write("UNINITIALIZED"); + + case JSOp::InitialYield: + case JSOp::Await: + case JSOp::Yield: + // Printing "yield SOMETHING" is confusing since the operand doesn't + // match to the syntax, since the stack operand for "yield 10" is + // the result object, not 10. + if (defIndex == 0) { + return write("RVAL"); + } + if (defIndex == 1) { + return write("GENERATOR"); + } + MOZ_ASSERT(defIndex == 2); + return write("RESUMEKIND"); + + case JSOp::ResumeKind: + return write("RESUMEKIND"); + + case JSOp::AsyncAwait: + case JSOp::AsyncResolve: + return write("PROMISE"); + + case JSOp::CheckPrivateField: + return write("HasPrivateField"); + + default: + break; + } + return write(""); + } +#endif /* DEBUG */ + + return write("(intermediate value)"); +} + +bool ExpressionDecompiler::decompilePC( + const OffsetAndDefIndex& offsetAndDefIndex) { + if (offsetAndDefIndex.isSpecial()) { +#ifdef DEBUG + if (isStackDump) { + if (offsetAndDefIndex.isMerged()) { + if (!write("merged<")) { + return false; + } + } else if (offsetAndDefIndex.isIgnored()) { + if (!write("ignored<")) { + return false; + } + } + + if (!decompilePC(script->offsetToPC(offsetAndDefIndex.specialOffset()), + offsetAndDefIndex.specialDefIndex())) { + return false; + } + + if (!write(">")) { + return false; + } + + return true; + } +#endif /* DEBUG */ + return write("(intermediate value)"); + } + + return decompilePC(script->offsetToPC(offsetAndDefIndex.offset()), + offsetAndDefIndex.defIndex()); +} + +bool ExpressionDecompiler::init() { + cx->check(script); + return sprinter.init(); +} + +bool ExpressionDecompiler::write(const char* s) { return sprinter.put(s); } + +bool ExpressionDecompiler::write(JSString* str) { + if (str == cx->names().dotThis) { + return write("this"); + } + return sprinter.putString(str); +} + +bool ExpressionDecompiler::quote(JSString* s, char quote) { + return QuoteString(&sprinter, s, quote); +} + +JSAtom* ExpressionDecompiler::loadAtom(jsbytecode* pc) { + return script->getAtom(pc); +} + +JSAtom* ExpressionDecompiler::getArg(unsigned slot) { + MOZ_ASSERT(script->isFunction()); + MOZ_ASSERT(slot < script->numArgs()); + + for (PositionalFormalParameterIter fi(script); fi; fi++) { + if (fi.argumentSlot() == slot) { + if (!fi.isDestructured()) { + return fi.name(); + } + + // Destructured arguments have no single binding name. + static const char destructuredParam[] = "(destructured parameter)"; + return Atomize(cx, destructuredParam, strlen(destructuredParam)); + } + } + + MOZ_CRASH("No binding"); +} + +UniqueChars ExpressionDecompiler::getOutput() { + ptrdiff_t len = sprinter.stringEnd() - sprinter.stringAt(0); + auto res = cx->make_pod_array(len + 1); + if (!res) { + return nullptr; + } + js_memcpy(res.get(), sprinter.stringAt(0), len); + res[len] = 0; + return res; +} + +} // anonymous namespace + +#if defined(DEBUG) || defined(JS_JITSPEW) +static bool DecompileAtPCForStackDump( + JSContext* cx, HandleScript script, + const OffsetAndDefIndex& offsetAndDefIndex, Sprinter* sp) { + // The expression decompiler asserts the script is in the current realm. + AutoRealm ar(cx, script); + + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), script); + parser.setStackDump(); + if (!parser.parse()) { + return false; + } + + ExpressionDecompiler ed(cx, script, parser); + ed.setStackDump(); + if (!ed.init()) { + return false; + } + + if (!ed.decompilePC(offsetAndDefIndex)) { + return false; + } + + UniqueChars result = ed.getOutput(); + if (!result) { + return false; + } + + return sp->put(result.get()); +} +#endif /* defined(DEBUG) || defined(JS_JITSPEW) */ + +static bool FindStartPC(JSContext* cx, const FrameIter& iter, + const BytecodeParser& parser, int spindex, + int skipStackHits, const Value& v, jsbytecode** valuepc, + uint8_t* defIndex) { + jsbytecode* current = *valuepc; + *valuepc = nullptr; + *defIndex = 0; + + if (spindex < 0 && spindex + int(parser.stackDepthAtPC(current)) < 0) { + spindex = JSDVG_SEARCH_STACK; + } + + if (spindex == JSDVG_SEARCH_STACK) { + size_t index = iter.numFrameSlots(); + + // The decompiler may be called from inside functions that are not + // called from script, but via the C++ API directly, such as + // Invoke. In that case, the youngest script frame may have a + // completely unrelated pc and stack depth, so we give up. + if (index < size_t(parser.stackDepthAtPC(current))) { + return true; + } + + // We search from fp->sp to base to find the most recently calculated + // value matching v under assumption that it is the value that caused + // the exception. + int stackHits = 0; + Value s; + do { + if (!index) { + return true; + } + s = iter.frameSlotValue(--index); + } while (s != v || stackHits++ != skipStackHits); + + // If the current PC has fewer values on the stack than the index we are + // looking for, the blamed value must be one pushed by the current + // bytecode (e.g. JSOp::MoreIter), so restore *valuepc. + if (index < size_t(parser.stackDepthAtPC(current))) { + *valuepc = parser.pcForStackOperand(current, index, defIndex); + } else { + *valuepc = current; + *defIndex = index - size_t(parser.stackDepthAtPC(current)); + } + } else { + *valuepc = parser.pcForStackOperand(current, spindex, defIndex); + } + return true; +} + +static bool DecompileExpressionFromStack(JSContext* cx, int spindex, + int skipStackHits, HandleValue v, + UniqueChars* res) { + MOZ_ASSERT(spindex < 0 || spindex == JSDVG_IGNORE_STACK || + spindex == JSDVG_SEARCH_STACK); + + *res = nullptr; + + /* + * Give up if we need deterministic behavior for differential testing. + * IonMonkey doesn't use InterpreterFrames and this ensures we get the same + * error messages. + */ + if (js::SupportDifferentialTesting()) { + return true; + } + + if (spindex == JSDVG_IGNORE_STACK) { + return true; + } + + FrameIter frameIter(cx); + + if (frameIter.done() || !frameIter.hasScript() || + frameIter.realm() != cx->realm()) { + return true; + } + + /* + * FIXME: Fall back if iter.isIon(), since the stack snapshot may be for the + * previous pc (see bug 831120). + */ + if (frameIter.isIon()) { + return true; + } + + RootedScript script(cx, frameIter.script()); + jsbytecode* valuepc = frameIter.pc(); + + MOZ_ASSERT(script->containsPC(valuepc)); + + // Give up if in prologue. + if (valuepc < script->main()) { + return true; + } + + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), frameIter.script()); + if (!parser.parse()) { + return false; + } + + uint8_t defIndex; + if (!FindStartPC(cx, frameIter, parser, spindex, skipStackHits, v, &valuepc, + &defIndex)) { + return false; + } + if (!valuepc) { + return true; + } + + ExpressionDecompiler ed(cx, script, parser); + if (!ed.init()) { + return false; + } + if (!ed.decompilePC(valuepc, defIndex)) { + return false; + } + + *res = ed.getOutput(); + return *res != nullptr; +} + +UniqueChars js::DecompileValueGenerator(JSContext* cx, int spindex, + HandleValue v, HandleString fallbackArg, + int skipStackHits) { + RootedString fallback(cx, fallbackArg); + { + UniqueChars result; + if (!DecompileExpressionFromStack(cx, spindex, skipStackHits, v, &result)) { + return nullptr; + } + if (result && strcmp(result.get(), "(intermediate value)")) { + return result; + } + } + if (!fallback) { + if (v.isUndefined()) { + return DuplicateString( + cx, js_undefined_str); // Prevent users from seeing "(void 0)" + } + fallback = ValueToSource(cx, v); + if (!fallback) { + return nullptr; + } + } + + return StringToNewUTF8CharsZ(cx, *fallback); +} + +static bool DecompileArgumentFromStack(JSContext* cx, int formalIndex, + UniqueChars* res) { + MOZ_ASSERT(formalIndex >= 0); + + *res = nullptr; + + /* See note in DecompileExpressionFromStack. */ + if (js::SupportDifferentialTesting()) { + return true; + } + + /* + * Settle on the nearest script frame, which should be the builtin that + * called the intrinsic. + */ + FrameIter frameIter(cx); + MOZ_ASSERT(!frameIter.done()); + MOZ_ASSERT(frameIter.script()->selfHosted()); + + /* + * Get the second-to-top frame, the non-self-hosted caller of the builtin + * that called the intrinsic. + */ + ++frameIter; + if (frameIter.done() || !frameIter.hasScript() || + frameIter.script()->selfHosted() || frameIter.realm() != cx->realm()) { + return true; + } + + RootedScript script(cx, frameIter.script()); + jsbytecode* current = frameIter.pc(); + + MOZ_ASSERT(script->containsPC(current)); + + if (current < script->main()) { + return true; + } + + /* Don't handle getters, setters or calls from fun.call/fun.apply. */ + JSOp op = JSOp(*current); + if (op != JSOp::Call && op != JSOp::CallIgnoresRv && op != JSOp::New) { + return true; + } + + if (static_cast(formalIndex) >= GET_ARGC(current)) { + return true; + } + + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), script); + if (!parser.parse()) { + return false; + } + + bool pushedNewTarget = op == JSOp::New; + int formalStackIndex = parser.stackDepthAtPC(current) - GET_ARGC(current) - + pushedNewTarget + formalIndex; + MOZ_ASSERT(formalStackIndex >= 0); + if (uint32_t(formalStackIndex) >= parser.stackDepthAtPC(current)) { + return true; + } + + ExpressionDecompiler ed(cx, script, parser); + if (!ed.init()) { + return false; + } + if (!ed.decompilePCForStackOperand(current, formalStackIndex)) { + return false; + } + + *res = ed.getOutput(); + return *res != nullptr; +} + +JSString* js::DecompileArgument(JSContext* cx, int formalIndex, HandleValue v) { + { + UniqueChars result; + if (!DecompileArgumentFromStack(cx, formalIndex, &result)) { + return nullptr; + } + if (result && strcmp(result.get(), "(intermediate value)")) { + JS::ConstUTF8CharsZ utf8chars(result.get(), strlen(result.get())); + return NewStringCopyUTF8Z(cx, utf8chars); + } + } + if (v.isUndefined()) { + return cx->names().undefined; // Prevent users from seeing "(void 0)" + } + + return ValueToSource(cx, v); +} + +extern bool js::IsValidBytecodeOffset(JSContext* cx, JSScript* script, + size_t offset) { + // This could be faster (by following jump instructions if the target + // is <= offset). + for (BytecodeRange r(cx, script); !r.empty(); r.popFront()) { + size_t here = r.frontOffset(); + if (here >= offset) { + return here == offset; + } + } + return false; +} + +/* + * There are three possible PCCount profiling states: + * + * 1. None: Neither scripts nor the runtime have count information. + * 2. Profile: Active scripts have count information, the runtime does not. + * 3. Query: Scripts do not have count information, the runtime does. + * + * When starting to profile scripts, counting begins immediately, with all JIT + * code discarded and recompiled with counts as necessary. Active interpreter + * frames will not begin profiling until they begin executing another script + * (via a call or return). + * + * The below API functions manage transitions to new states, according + * to the table below. + * + * Old State + * ------------------------- + * Function None Profile Query + * -------- + * StartPCCountProfiling Profile Profile Profile + * StopPCCountProfiling None Query Query + * PurgePCCounts None None None + */ + +static void ReleaseScriptCounts(JSRuntime* rt) { + MOZ_ASSERT(rt->scriptAndCountsVector); + + js_delete(rt->scriptAndCountsVector.ref()); + rt->scriptAndCountsVector = nullptr; +} + +JS_FRIEND_API void js::StartPCCountProfiling(JSContext* cx) { + JSRuntime* rt = cx->runtime(); + + if (rt->profilingScripts) { + return; + } + + if (rt->scriptAndCountsVector) { + ReleaseScriptCounts(rt); + } + + ReleaseAllJITCode(rt->defaultFreeOp()); + + rt->profilingScripts = true; +} + +JS_FRIEND_API void js::StopPCCountProfiling(JSContext* cx) { + JSRuntime* rt = cx->runtime(); + + if (!rt->profilingScripts) { + return; + } + MOZ_ASSERT(!rt->scriptAndCountsVector); + + ReleaseAllJITCode(rt->defaultFreeOp()); + + auto* vec = cx->new_>( + cx, ScriptAndCountsVector()); + if (!vec) { + return; + } + + for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next()) { + for (auto base = zone->cellIter(); !base.done(); base.next()) { + if (base->hasScriptCounts() && base->hasJitScript()) { + if (!vec->append(base->asJSScript())) { + return; + } + } + } + } + + rt->profilingScripts = false; + rt->scriptAndCountsVector = vec; +} + +JS_FRIEND_API void js::PurgePCCounts(JSContext* cx) { + JSRuntime* rt = cx->runtime(); + + if (!rt->scriptAndCountsVector) { + return; + } + MOZ_ASSERT(!rt->profilingScripts); + + ReleaseScriptCounts(rt); +} + +JS_FRIEND_API size_t js::GetPCCountScriptCount(JSContext* cx) { + JSRuntime* rt = cx->runtime(); + + if (!rt->scriptAndCountsVector) { + return 0; + } + + return rt->scriptAndCountsVector->length(); +} + +static MOZ_MUST_USE bool JSONStringProperty(Sprinter& sp, JSONPrinter& json, + const char* name, JSString* str) { + json.beginStringProperty(name); + if (!JSONQuoteString(&sp, str)) { + return false; + } + json.endStringProperty(); + return true; +} + +JS_FRIEND_API JSString* js::GetPCCountScriptSummary(JSContext* cx, + size_t index) { + JSRuntime* rt = cx->runtime(); + + if (!rt->scriptAndCountsVector || + index >= rt->scriptAndCountsVector->length()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BUFFER_TOO_SMALL); + return nullptr; + } + + const ScriptAndCounts& sac = (*rt->scriptAndCountsVector)[index]; + RootedScript script(cx, sac.script); + + Sprinter sp(cx); + if (!sp.init()) { + return nullptr; + } + + JSONPrinter json(sp, false); + + json.beginObject(); + + RootedString filename(cx, NewStringCopyZ(cx, script->filename())); + if (!filename) { + return nullptr; + } + if (!JSONStringProperty(sp, json, "file", filename)) { + return nullptr; + } + json.property("line", script->lineno()); + + if (JSFunction* fun = script->function()) { + if (JSAtom* atom = fun->displayAtom()) { + if (!JSONStringProperty(sp, json, "name", atom)) { + return nullptr; + } + } + } + + uint64_t total = 0; + + AllBytecodesIterable iter(script); + for (BytecodeLocation loc : iter) { + if (const PCCounts* counts = sac.maybeGetPCCounts(loc.toRawBytecode())) { + total += counts->numExec(); + } + } + + json.beginObjectProperty("totals"); + + json.property(PCCounts::numExecName, total); + + uint64_t ionActivity = 0; + jit::IonScriptCounts* ionCounts = sac.getIonCounts(); + while (ionCounts) { + for (size_t i = 0; i < ionCounts->numBlocks(); i++) { + ionActivity += ionCounts->block(i).hitCount(); + } + ionCounts = ionCounts->previous(); + } + if (ionActivity) { + json.property("ion", ionActivity); + } + + json.endObject(); + + json.endObject(); + + if (sp.hadOutOfMemory()) { + return nullptr; + } + + return NewStringCopyZ(cx, sp.string()); +} + +static bool GetPCCountJSON(JSContext* cx, const ScriptAndCounts& sac, + Sprinter& sp) { + JSONPrinter json(sp, false); + + RootedScript script(cx, sac.script); + + LifoAllocScope allocScope(&cx->tempLifoAlloc()); + BytecodeParser parser(cx, allocScope.alloc(), script); + if (!parser.parse()) { + return false; + } + + json.beginObject(); + + JSString* str = JS_DecompileScript(cx, script); + if (!str) { + return false; + } + + if (!JSONStringProperty(sp, json, "text", str)) { + return false; + } + + json.property("line", script->lineno()); + + json.beginListProperty("opcodes"); + + uint64_t hits = 0; + for (BytecodeRangeWithPosition range(cx, script); !range.empty(); + range.popFront()) { + jsbytecode* pc = range.frontPC(); + size_t offset = script->pcToOffset(pc); + JSOp op = JSOp(*pc); + + // If the current instruction is a jump target, + // then update the number of hits. + if (const PCCounts* counts = sac.maybeGetPCCounts(pc)) { + hits = counts->numExec(); + } + + json.beginObject(); + + json.property("id", offset); + json.property("line", range.frontLineNumber()); + json.property("name", CodeName(op)); + + { + ExpressionDecompiler ed(cx, script, parser); + if (!ed.init()) { + return false; + } + // defIndex passed here is not used. + if (!ed.decompilePC(pc, /* defIndex = */ 0)) { + return false; + } + UniqueChars text = ed.getOutput(); + if (!text) { + return false; + } + + JS::ConstUTF8CharsZ utf8chars(text.get(), strlen(text.get())); + JSString* str = NewStringCopyUTF8Z(cx, utf8chars); + if (!str) { + return false; + } + + if (!JSONStringProperty(sp, json, "text", str)) { + return false; + } + } + + json.beginObjectProperty("counts"); + if (hits > 0) { + json.property(PCCounts::numExecName, hits); + } + json.endObject(); + + json.endObject(); + + // If the current instruction has thrown, + // then decrement the hit counts with the number of throws. + if (const PCCounts* counts = sac.maybeGetThrowCounts(pc)) { + hits -= counts->numExec(); + } + } + + json.endList(); + + if (jit::IonScriptCounts* ionCounts = sac.getIonCounts()) { + json.beginListProperty("ion"); + + while (ionCounts) { + json.beginList(); + for (size_t i = 0; i < ionCounts->numBlocks(); i++) { + const jit::IonBlockCounts& block = ionCounts->block(i); + + json.beginObject(); + json.property("id", block.id()); + json.property("offset", block.offset()); + + json.beginListProperty("successors"); + for (size_t j = 0; j < block.numSuccessors(); j++) { + json.value(block.successor(j)); + } + json.endList(); + + json.property("hits", block.hitCount()); + + JSString* str = NewStringCopyZ(cx, block.code()); + if (!str) { + return false; + } + + if (!JSONStringProperty(sp, json, "code", str)) { + return false; + } + + json.endObject(); + } + json.endList(); + + ionCounts = ionCounts->previous(); + } + + json.endList(); + } + + json.endObject(); + + return true; +} + +JS_FRIEND_API JSString* js::GetPCCountScriptContents(JSContext* cx, + size_t index) { + JSRuntime* rt = cx->runtime(); + + if (!rt->scriptAndCountsVector || + index >= rt->scriptAndCountsVector->length()) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BUFFER_TOO_SMALL); + return nullptr; + } + + const ScriptAndCounts& sac = (*rt->scriptAndCountsVector)[index]; + JSScript* script = sac.script; + + Sprinter sp(cx); + if (!sp.init()) { + return nullptr; + } + + { + AutoRealm ar(cx, &script->global()); + if (!GetPCCountJSON(cx, sac, sp)) { + return nullptr; + } + } + + if (sp.hadOutOfMemory()) { + return nullptr; + } + + return NewStringCopyZ(cx, sp.string()); +} + +struct CollectedScripts { + MutableHandle scripts; + bool ok = true; + + explicit CollectedScripts(MutableHandle scripts) + : scripts(scripts) {} + + static void consider(JSRuntime* rt, void* data, BaseScript* script, + const JS::AutoRequireNoGC& nogc) { + auto self = static_cast(data); + if (!script->filename()) { + return; + } + if (!self->scripts.append(script->asJSScript())) { + self->ok = false; + } + } +}; + +static bool GenerateLcovInfo(JSContext* cx, JS::Realm* realm, + GenericPrinter& out) { + AutoRealmUnchecked ar(cx, realm); + + // Collect the list of scripts which are part of the current realm. + + MOZ_RELEASE_ASSERT( + coverage::IsLCovEnabled(), + "Coverage must be enabled for process before generating LCov info"); + + // Hold the scripts that we have already flushed, to avoid flushing them + // twice. + using JSScriptSet = GCHashSet; + Rooted scriptsDone(cx, JSScriptSet(cx)); + + Rooted queue(cx, ScriptVector(cx)); + + { + CollectedScripts result(&queue); + IterateScripts(cx, realm, &result, &CollectedScripts::consider); + if (!result.ok) { + return false; + } + } + + if (queue.length() == 0) { + return true; + } + + // Ensure the LCovRealm exists to collect info into. + coverage::LCovRealm* lcovRealm = realm->lcovRealm(); + if (!lcovRealm) { + return false; + } + + // Collect code coverage info for one realm. + do { + RootedScript script(cx, queue.popCopy()); + RootedFunction fun(cx); + + JSScriptSet::AddPtr entry = scriptsDone.lookupForAdd(script); + if (entry) { + continue; + } + + if (!coverage::CollectScriptCoverage(script, false)) { + return false; + } + + script->resetScriptCounts(); + + if (!scriptsDone.add(entry, script)) { + return false; + } + + if (!script->isTopLevel()) { + continue; + } + + // Iterate from the last to the first object in order to have + // the functions them visited in the opposite order when popping + // elements from the stack of remaining scripts, such that the + // functions are more-less listed with increasing line numbers. + auto gcthings = script->gcthings(); + for (JS::GCCellPtr gcThing : mozilla::Reversed(gcthings)) { + if (!gcThing.is()) { + continue; + } + JSObject* obj = &gcThing.as(); + + if (!obj->is()) { + continue; + } + fun = &obj->as(); + + // Ignore asm.js functions + if (!fun->isInterpreted()) { + continue; + } + + // Queue the script in the list of script associated to the + // current source. + JSScript* childScript = JSFunction::getOrCreateScript(cx, fun); + if (!childScript || !queue.append(childScript)) { + return false; + } + } + } while (!queue.empty()); + + bool isEmpty = true; + lcovRealm->exportInto(out, &isEmpty); + if (out.hadOutOfMemory()) { + return false; + } + + return true; +} + +JS_FRIEND_API UniqueChars js::GetCodeCoverageSummaryAll(JSContext* cx, + size_t* length) { + Sprinter out(cx); + if (!out.init()) { + return nullptr; + } + + for (RealmsIter realm(cx->runtime()); !realm.done(); realm.next()) { + if (!GenerateLcovInfo(cx, realm, out)) { + JS_ReportOutOfMemory(cx); + return nullptr; + } + } + + *length = out.getOffset(); + return js::DuplicateString(cx, out.string(), *length); +} + +JS_FRIEND_API UniqueChars js::GetCodeCoverageSummary(JSContext* cx, + size_t* length) { + Sprinter out(cx); + if (!out.init()) { + return nullptr; + } + + if (!GenerateLcovInfo(cx, cx->realm(), out)) { + JS_ReportOutOfMemory(cx); + return nullptr; + } + + *length = out.getOffset(); + return js::DuplicateString(cx, out.string(), *length); +} diff --git a/js/src/vm/BytecodeUtil.h b/js/src/vm/BytecodeUtil.h new file mode 100644 index 0000000000..375abda471 --- /dev/null +++ b/js/src/vm/BytecodeUtil.h @@ -0,0 +1,723 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_BytecodeUtil_h +#define vm_BytecodeUtil_h + +/* + * JS bytecode definitions. + */ + +#include "mozilla/Assertions.h" +#include "mozilla/Attributes.h" +#include "mozilla/EndianUtils.h" + +#include +#include +#include + +#include "jstypes.h" +#include "NamespaceImports.h" + +#include "js/TypeDecls.h" +#include "js/UniquePtr.h" +#include "js/Utility.h" +#include "js/Value.h" +#include "vm/BytecodeFormatFlags.h" // JOF_* +#include "vm/GeneratorResumeKind.h" +#include "vm/Opcodes.h" +#include "vm/SharedStencil.h" // js::GCThingIndex +#include "vm/ThrowMsgKind.h" // ThrowMsgKind, ThrowCondition + +namespace js { +class Sprinter; +} // namespace js + +/* + * JS operation bytecodes. + */ +enum class JSOp : uint8_t { +#define ENUMERATE_OPCODE(op, ...) op, + FOR_EACH_OPCODE(ENUMERATE_OPCODE) +#undef ENUMERATE_OPCODE +}; + +/* Shorthand for type from format. */ + +static inline uint32_t JOF_TYPE(uint32_t fmt) { return fmt & JOF_TYPEMASK; } + +/* Shorthand for mode from format. */ + +static inline uint32_t JOF_MODE(uint32_t fmt) { return fmt & JOF_MODEMASK; } + +/* + * Immediate operand getters, setters, and bounds. + */ + +static MOZ_ALWAYS_INLINE uint8_t GET_UINT8(jsbytecode* pc) { + return uint8_t(pc[1]); +} + +static MOZ_ALWAYS_INLINE void SET_UINT8(jsbytecode* pc, uint8_t u) { + pc[1] = jsbytecode(u); +} + +/* Common uint16_t immediate format helpers. */ + +static inline jsbytecode UINT16_HI(uint16_t i) { return jsbytecode(i >> 8); } + +static inline jsbytecode UINT16_LO(uint16_t i) { return jsbytecode(i); } + +static MOZ_ALWAYS_INLINE uint16_t GET_UINT16(const jsbytecode* pc) { + uint16_t result; + mozilla::NativeEndian::copyAndSwapFromLittleEndian(&result, pc + 1, 1); + return result; +} + +static MOZ_ALWAYS_INLINE void SET_UINT16(jsbytecode* pc, uint16_t i) { + mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &i, 1); +} + +static const unsigned UINT16_LIMIT = 1 << 16; + +/* Helpers for accessing the offsets of jump opcodes. */ +static const unsigned JUMP_OFFSET_LEN = 4; +static const int32_t JUMP_OFFSET_MIN = INT32_MIN; +static const int32_t JUMP_OFFSET_MAX = INT32_MAX; + +static MOZ_ALWAYS_INLINE uint32_t GET_UINT24(const jsbytecode* pc) { +#if MOZ_LITTLE_ENDIAN() + // Do a single 32-bit load (for opcode and operand), then shift off the + // opcode. + uint32_t result; + memcpy(&result, pc, 4); + return result >> 8; +#else + return uint32_t((pc[3] << 16) | (pc[2] << 8) | pc[1]); +#endif +} + +static MOZ_ALWAYS_INLINE void SET_UINT24(jsbytecode* pc, uint32_t i) { + MOZ_ASSERT(i < (1 << 24)); + +#if MOZ_LITTLE_ENDIAN() + memcpy(pc + 1, &i, 3); +#else + pc[1] = jsbytecode(i); + pc[2] = jsbytecode(i >> 8); + pc[3] = jsbytecode(i >> 16); +#endif +} + +static MOZ_ALWAYS_INLINE int8_t GET_INT8(const jsbytecode* pc) { + return int8_t(pc[1]); +} + +static MOZ_ALWAYS_INLINE uint32_t GET_UINT32(const jsbytecode* pc) { + uint32_t result; + mozilla::NativeEndian::copyAndSwapFromLittleEndian(&result, pc + 1, 1); + return result; +} + +static MOZ_ALWAYS_INLINE void SET_UINT32(jsbytecode* pc, uint32_t u) { + mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &u, 1); +} + +static MOZ_ALWAYS_INLINE JS::Value GET_INLINE_VALUE(const jsbytecode* pc) { + uint64_t raw; + mozilla::NativeEndian::copyAndSwapFromLittleEndian(&raw, pc + 1, 1); + return JS::Value::fromRawBits(raw); +} + +static MOZ_ALWAYS_INLINE void SET_INLINE_VALUE(jsbytecode* pc, + const JS::Value& v) { + uint64_t raw = v.asRawBits(); + mozilla::NativeEndian::copyAndSwapToLittleEndian(pc + 1, &raw, 1); +} + +static MOZ_ALWAYS_INLINE int32_t GET_INT32(const jsbytecode* pc) { + return static_cast(GET_UINT32(pc)); +} + +static MOZ_ALWAYS_INLINE void SET_INT32(jsbytecode* pc, int32_t i) { + SET_UINT32(pc, static_cast(i)); +} + +static MOZ_ALWAYS_INLINE int32_t GET_JUMP_OFFSET(jsbytecode* pc) { + return GET_INT32(pc); +} + +static MOZ_ALWAYS_INLINE void SET_JUMP_OFFSET(jsbytecode* pc, int32_t off) { + SET_INT32(pc, off); +} + +static const unsigned GCTHING_INDEX_LEN = 4; + +static MOZ_ALWAYS_INLINE js::GCThingIndex GET_GCTHING_INDEX( + const jsbytecode* pc) { + return js::GCThingIndex(GET_UINT32(pc)); +} + +static MOZ_ALWAYS_INLINE void SET_GCTHING_INDEX(jsbytecode* pc, + js::GCThingIndex index) { + SET_UINT32(pc, index.index); +} + +// Index limit is determined by SrcNote::FourByteOffsetFlag, see +// frontend/BytecodeEmitter.h. +static const unsigned INDEX_LIMIT_LOG2 = 31; +static const uint32_t INDEX_LIMIT = uint32_t(1) << INDEX_LIMIT_LOG2; + +static inline jsbytecode ARGC_HI(uint16_t argc) { return UINT16_HI(argc); } + +static inline jsbytecode ARGC_LO(uint16_t argc) { return UINT16_LO(argc); } + +static inline uint16_t GET_ARGC(const jsbytecode* pc) { return GET_UINT16(pc); } + +static const unsigned ARGC_LIMIT = UINT16_LIMIT; + +static inline uint16_t GET_ARGNO(const jsbytecode* pc) { + return GET_UINT16(pc); +} + +static inline void SET_ARGNO(jsbytecode* pc, uint16_t argno) { + SET_UINT16(pc, argno); +} + +static const unsigned ARGNO_LEN = 2; +static const unsigned ARGNO_LIMIT = UINT16_LIMIT; + +static inline uint32_t GET_LOCALNO(const jsbytecode* pc) { + return GET_UINT24(pc); +} + +static inline void SET_LOCALNO(jsbytecode* pc, uint32_t varno) { + SET_UINT24(pc, varno); +} + +static const unsigned LOCALNO_LEN = 3; +static const unsigned LOCALNO_BITS = 24; +static const uint32_t LOCALNO_LIMIT = 1 << LOCALNO_BITS; + +static inline uint32_t GET_RESUMEINDEX(const jsbytecode* pc) { + return GET_UINT24(pc); +} + +static inline void SET_RESUMEINDEX(jsbytecode* pc, uint32_t resumeIndex) { + SET_UINT24(pc, resumeIndex); +} + +static const unsigned ICINDEX_LEN = 4; + +static inline uint32_t GET_ICINDEX(const jsbytecode* pc) { + return GET_UINT32(pc); +} + +static inline void SET_ICINDEX(jsbytecode* pc, uint32_t icIndex) { + SET_UINT32(pc, icIndex); +} + +static inline unsigned LoopHeadDepthHint(jsbytecode* pc) { + MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead); + return GET_UINT8(pc + 4); +} + +static inline void SetLoopHeadDepthHint(jsbytecode* pc, unsigned loopDepth) { + MOZ_ASSERT(JSOp(*pc) == JSOp::LoopHead); + uint8_t data = std::min(loopDepth, unsigned(UINT8_MAX)); + SET_UINT8(pc + 4, data); +} + +static inline bool IsBackedgePC(jsbytecode* pc) { + switch (JSOp(*pc)) { + case JSOp::Goto: + case JSOp::IfNe: + return GET_JUMP_OFFSET(pc) < 0; + default: + return false; + } +} + +static inline bool IsBackedgeForLoopHead(jsbytecode* pc, jsbytecode* loopHead) { + MOZ_ASSERT(JSOp(*loopHead) == JSOp::LoopHead); + return IsBackedgePC(pc) && pc + GET_JUMP_OFFSET(pc) == loopHead; +} + +static inline void SetClassConstructorOperands(jsbytecode* pc, + js::GCThingIndex atomIndex, + uint32_t sourceStart, + uint32_t sourceEnd) { + MOZ_ASSERT(JSOp(*pc) == JSOp::ClassConstructor || + JSOp(*pc) == JSOp::DerivedConstructor); + SET_GCTHING_INDEX(pc, atomIndex); + SET_UINT32(pc + 4, sourceStart); + SET_UINT32(pc + 8, sourceEnd); +} + +static inline void GetClassConstructorOperands(jsbytecode* pc, + js::GCThingIndex* atomIndex, + uint32_t* sourceStart, + uint32_t* sourceEnd) { + MOZ_ASSERT(JSOp(*pc) == JSOp::ClassConstructor || + JSOp(*pc) == JSOp::DerivedConstructor); + *atomIndex = GET_GCTHING_INDEX(pc); + *sourceStart = GET_UINT32(pc + 4); + *sourceEnd = GET_UINT32(pc + 8); +} + +/* + * Describes the 'hops' component of a JOF_ENVCOORD opcode. + * + * Note: this component is only 8 bits wide, limiting the maximum number of + * scopes between a use and def to roughly 255. This is a pretty small limit but + * note that SpiderMonkey's recursive descent parser can only parse about this + * many functions before hitting the C-stack recursion limit so this shouldn't + * be a significant limitation in practice. + */ + +static inline uint8_t GET_ENVCOORD_HOPS(jsbytecode* pc) { + return GET_UINT8(pc); +} + +static inline void SET_ENVCOORD_HOPS(jsbytecode* pc, uint8_t hops) { + SET_UINT8(pc, hops); +} + +static const unsigned ENVCOORD_HOPS_LEN = 1; +static const unsigned ENVCOORD_HOPS_BITS = 8; +static const unsigned ENVCOORD_HOPS_LIMIT = 1 << ENVCOORD_HOPS_BITS; + +/* Describes the 'slot' component of a JOF_ENVCOORD opcode. */ +static inline uint32_t GET_ENVCOORD_SLOT(const jsbytecode* pc) { + return GET_UINT24(pc); +} + +static inline void SET_ENVCOORD_SLOT(jsbytecode* pc, uint32_t slot) { + SET_UINT24(pc, slot); +} + +static const unsigned ENVCOORD_SLOT_LEN = 3; +static const unsigned ENVCOORD_SLOT_BITS = 24; +static const uint32_t ENVCOORD_SLOT_LIMIT = 1 << ENVCOORD_SLOT_BITS; + +struct JSCodeSpec { + uint8_t length; /* length including opcode byte */ + int8_t nuses; /* arity, -1 if variadic */ + int8_t ndefs; /* number of stack results */ + uint32_t format; /* immediate operand format */ + + uint32_t type() const { return JOF_TYPE(format); } +}; + +namespace js { + +extern const JSCodeSpec CodeSpecTable[]; + +inline const JSCodeSpec& CodeSpec(JSOp op) { + return CodeSpecTable[uint8_t(op)]; +} + +extern const char* const CodeNameTable[]; + +inline const char* CodeName(JSOp op) { return CodeNameTable[uint8_t(op)]; } + +/* Shorthand for type from opcode. */ + +static inline uint32_t JOF_OPTYPE(JSOp op) { + return JOF_TYPE(CodeSpec(op).format); +} + +static inline bool IsJumpOpcode(JSOp op) { return JOF_OPTYPE(op) == JOF_JUMP; } + +static inline bool BytecodeFallsThrough(JSOp op) { + // Note: + // * JSOp::Yield/JSOp::Await is considered to fall through, like JSOp::Call. + // * JSOp::Gosub falls through indirectly, after executing a 'finally'. + switch (op) { + case JSOp::Goto: + case JSOp::Default: + case JSOp::Return: + case JSOp::RetRval: + case JSOp::Retsub: + case JSOp::FinalYieldRval: + case JSOp::Throw: + case JSOp::ThrowMsg: + case JSOp::ThrowSetConst: + case JSOp::TableSwitch: + return false; + default: + return true; + } +} + +static inline bool BytecodeIsJumpTarget(JSOp op) { + switch (op) { + case JSOp::JumpTarget: + case JSOp::LoopHead: + case JSOp::AfterYield: + return true; + default: + return false; + } +} + +MOZ_ALWAYS_INLINE unsigned StackUses(jsbytecode* pc) { + JSOp op = JSOp(*pc); + int nuses = CodeSpec(op).nuses; + if (nuses >= 0) { + return nuses; + } + + MOZ_ASSERT(nuses == -1); + switch (op) { + case JSOp::PopN: + return GET_UINT16(pc); + case JSOp::New: + case JSOp::SuperCall: + return 2 + GET_ARGC(pc) + 1; + default: + /* stack: fun, this, [argc arguments] */ + MOZ_ASSERT(op == JSOp::Call || op == JSOp::CallIgnoresRv || + op == JSOp::Eval || op == JSOp::CallIter || + op == JSOp::StrictEval || op == JSOp::FunCall || + op == JSOp::FunApply); + return 2 + GET_ARGC(pc); + } +} + +MOZ_ALWAYS_INLINE unsigned StackDefs(jsbytecode* pc) { + int ndefs = CodeSpec(JSOp(*pc)).ndefs; + MOZ_ASSERT(ndefs >= 0); + return ndefs; +} + +#if defined(DEBUG) || defined(JS_JITSPEW) +/* + * Given bytecode address pc in script's main program code, compute the operand + * stack depth just before (JSOp) *pc executes. If *pc is not reachable, return + * false. + */ +extern bool ReconstructStackDepth(JSContext* cx, JSScript* script, + jsbytecode* pc, uint32_t* depth, + bool* reachablePC); +#endif + +} /* namespace js */ + +#define JSDVG_IGNORE_STACK 0 +#define JSDVG_SEARCH_STACK 1 + +namespace js { + +/* + * Find the source expression that resulted in v, and return a newly allocated + * C-string containing it. Fall back on v's string conversion (fallback) if we + * can't find the bytecode that generated and pushed v on the operand stack. + * + * Search the current stack frame if spindex is JSDVG_SEARCH_STACK. Don't + * look for v on the stack if spindex is JSDVG_IGNORE_STACK. Otherwise, + * spindex is the negative index of v, measured from cx->fp->sp, or from a + * lower frame's sp if cx->fp is native. + * + * The optional argument skipStackHits can be used to skip a hit in the stack + * frame. This can be useful in self-hosted code that wants to report value + * errors containing decompiled values that are useful for the user, instead of + * values used internally by the self-hosted code. + * + * The caller must call JS_free on the result after a successful call. + */ +UniqueChars DecompileValueGenerator(JSContext* cx, int spindex, HandleValue v, + HandleString fallback, + int skipStackHits = 0); + +/* + * Decompile the formal argument at formalIndex in the nearest non-builtin + * stack frame, falling back with converting v to source. + */ +JSString* DecompileArgument(JSContext* cx, int formalIndex, HandleValue v); + +static inline unsigned GetOpLength(JSOp op) { + MOZ_ASSERT(uint8_t(op) < JSOP_LIMIT); + MOZ_ASSERT(CodeSpec(op).length > 0); + return CodeSpec(op).length; +} + +static inline unsigned GetBytecodeLength(const jsbytecode* pc) { + JSOp op = (JSOp)*pc; + return GetOpLength(op); +} + +static inline bool BytecodeIsPopped(jsbytecode* pc) { + jsbytecode* next = pc + GetBytecodeLength(pc); + return JSOp(*next) == JSOp::Pop; +} + +static inline bool BytecodeFlowsToBitop(jsbytecode* pc) { + // Look for simple bytecode for integer conversions like (x | 0) or (x & -1). + jsbytecode* next = pc + GetBytecodeLength(pc); + if (JSOp(*next) == JSOp::BitOr || JSOp(*next) == JSOp::BitAnd) { + return true; + } + if (JSOp(*next) == JSOp::Int8 && GET_INT8(next) == -1) { + next += GetBytecodeLength(next); + if (JSOp(*next) == JSOp::BitAnd) { + return true; + } + return false; + } + if (JSOp(*next) == JSOp::One) { + next += GetBytecodeLength(next); + if (JSOp(*next) == JSOp::Neg) { + next += GetBytecodeLength(next); + if (JSOp(*next) == JSOp::BitAnd) { + return true; + } + } + return false; + } + if (JSOp(*next) == JSOp::Zero) { + next += GetBytecodeLength(next); + if (JSOp(*next) == JSOp::BitOr) { + return true; + } + return false; + } + return false; +} + +extern bool IsValidBytecodeOffset(JSContext* cx, JSScript* script, + size_t offset); + +inline bool IsArgOp(JSOp op) { return JOF_OPTYPE(op) == JOF_QARG; } + +inline bool IsLocalOp(JSOp op) { return JOF_OPTYPE(op) == JOF_LOCAL; } + +inline bool IsAliasedVarOp(JSOp op) { return JOF_OPTYPE(op) == JOF_ENVCOORD; } + +inline bool IsGlobalOp(JSOp op) { return CodeSpec(op).format & JOF_GNAME; } + +inline bool IsPropertySetOp(JSOp op) { + return CodeSpec(op).format & JOF_PROPSET; +} + +inline bool IsPropertyInitOp(JSOp op) { + return CodeSpec(op).format & JOF_PROPINIT; +} + +inline bool IsLooseEqualityOp(JSOp op) { + return op == JSOp::Eq || op == JSOp::Ne; +} + +inline bool IsStrictEqualityOp(JSOp op) { + return op == JSOp::StrictEq || op == JSOp::StrictNe; +} + +inline bool IsEqualityOp(JSOp op) { + return IsLooseEqualityOp(op) || IsStrictEqualityOp(op); +} + +inline bool IsRelationalOp(JSOp op) { + return op == JSOp::Lt || op == JSOp::Le || op == JSOp::Gt || op == JSOp::Ge; +} + +inline bool IsCheckStrictOp(JSOp op) { + return CodeSpec(op).format & JOF_CHECKSTRICT; +} + +inline bool IsNameOp(JSOp op) { return CodeSpec(op).format & JOF_NAME; } + +#ifdef DEBUG +inline bool IsCheckSloppyOp(JSOp op) { + return CodeSpec(op).format & JOF_CHECKSLOPPY; +} +#endif + +inline bool IsAtomOp(JSOp op) { return JOF_OPTYPE(op) == JOF_ATOM; } + +inline bool IsGetPropOp(JSOp op) { return op == JSOp::GetProp; } + +inline bool IsGetPropPC(const jsbytecode* pc) { return IsGetPropOp(JSOp(*pc)); } + +inline bool IsHiddenInitOp(JSOp op) { + return op == JSOp::InitHiddenProp || op == JSOp::InitHiddenElem || + op == JSOp::InitHiddenPropGetter || op == JSOp::InitHiddenElemGetter || + op == JSOp::InitHiddenPropSetter || op == JSOp::InitHiddenElemSetter || + op == JSOp::InitLockedElem; +} + +inline bool IsStrictSetPC(jsbytecode* pc) { + JSOp op = JSOp(*pc); + return op == JSOp::StrictSetProp || op == JSOp::StrictSetName || + op == JSOp::StrictSetGName || op == JSOp::StrictSetElem; +} + +inline bool IsSetPropOp(JSOp op) { + return op == JSOp::SetProp || op == JSOp::StrictSetProp || + op == JSOp::SetName || op == JSOp::StrictSetName || + op == JSOp::SetGName || op == JSOp::StrictSetGName; +} + +inline bool IsSetPropPC(const jsbytecode* pc) { return IsSetPropOp(JSOp(*pc)); } + +inline bool IsGetElemOp(JSOp op) { return op == JSOp::GetElem; } + +inline bool IsGetElemPC(const jsbytecode* pc) { return IsGetElemOp(JSOp(*pc)); } + +inline bool IsSetElemOp(JSOp op) { + return op == JSOp::SetElem || op == JSOp::StrictSetElem; +} + +inline bool IsSetElemPC(const jsbytecode* pc) { return IsSetElemOp(JSOp(*pc)); } + +inline bool IsElemPC(const jsbytecode* pc) { + return CodeSpec(JSOp(*pc)).format & JOF_ELEM; +} + +inline bool IsInvokeOp(JSOp op) { return CodeSpec(op).format & JOF_INVOKE; } + +inline bool IsInvokePC(jsbytecode* pc) { return IsInvokeOp(JSOp(*pc)); } + +inline bool IsStrictEvalPC(jsbytecode* pc) { + JSOp op = JSOp(*pc); + return op == JSOp::StrictEval || op == JSOp::StrictSpreadEval; +} + +inline bool IsConstructOp(JSOp op) { + return CodeSpec(op).format & JOF_CONSTRUCT; +} +inline bool IsConstructPC(const jsbytecode* pc) { + return IsConstructOp(JSOp(*pc)); +} + +inline bool IsSpreadOp(JSOp op) { return CodeSpec(op).format & JOF_SPREAD; } + +inline bool IsSpreadPC(const jsbytecode* pc) { return IsSpreadOp(JSOp(*pc)); } + +static inline int32_t GetBytecodeInteger(jsbytecode* pc) { + switch (JSOp(*pc)) { + case JSOp::Zero: + return 0; + case JSOp::One: + return 1; + case JSOp::Uint16: + return GET_UINT16(pc); + case JSOp::Uint24: + return GET_UINT24(pc); + case JSOp::Int8: + return GET_INT8(pc); + case JSOp::Int32: + return GET_INT32(pc); + default: + MOZ_CRASH("Bad op"); + } +} + +inline bool BytecodeOpHasIC(JSOp op) { return CodeSpec(op).format & JOF_IC; } + +inline void GetCheckPrivateFieldOperands(jsbytecode* pc, + ThrowCondition* throwCondition, + ThrowMsgKind* throwKind) { + static_assert(sizeof(ThrowCondition) == sizeof(uint8_t)); + static_assert(sizeof(ThrowMsgKind) == sizeof(uint8_t)); + + MOZ_ASSERT(JSOp(*pc) == JSOp::CheckPrivateField); + uint8_t throwConditionByte = GET_UINT8(pc); + uint8_t throwKindByte = GET_UINT8(pc + 1); + + *throwCondition = static_cast(throwConditionByte); + *throwKind = static_cast(throwKindByte); + + MOZ_ASSERT(*throwCondition == ThrowCondition::ThrowHas || + *throwCondition == ThrowCondition::ThrowHasNot || + *throwCondition == ThrowCondition::NoThrow); + + MOZ_ASSERT(*throwKind == ThrowMsgKind::PrivateDoubleInit || + *throwKind == ThrowMsgKind::MissingPrivateOnGet || + *throwKind == ThrowMsgKind::MissingPrivateOnSet); +} + +// Return true iff the combination of the ThrowCondition and hasOwn result +// will throw an exception. +static inline bool CheckPrivateFieldWillThrow(ThrowCondition condition, + bool hasOwn) { + if ((condition == ThrowCondition::ThrowHasNot && !hasOwn) || + (condition == ThrowCondition::ThrowHas && hasOwn)) { + // Met a throw condition. + return true; + } + + return false; +} + +/* + * Counts accumulated for a single opcode in a script. The counts tracked vary + * between opcodes, and this structure ensures that counts are accessed in a + * coherent fashion. + */ +class PCCounts { + /* + * Offset of the pc inside the script. This fields is used to lookup opcode + * which have annotations. + */ + size_t pcOffset_; + + /* + * Record the number of execution of one instruction, or the number of + * throws executed. + */ + uint64_t numExec_; + + public: + explicit PCCounts(size_t off) : pcOffset_(off), numExec_(0) {} + + size_t pcOffset() const { return pcOffset_; } + + // Used for sorting and searching. + bool operator<(const PCCounts& rhs) const { + return pcOffset_ < rhs.pcOffset_; + } + + uint64_t& numExec() { return numExec_; } + uint64_t numExec() const { return numExec_; } + + static const char numExecName[]; +}; + +static inline jsbytecode* GetNextPc(jsbytecode* pc) { + return pc + GetBytecodeLength(pc); +} + +inline GeneratorResumeKind IntToResumeKind(int32_t value) { + MOZ_ASSERT(uint32_t(value) <= uint32_t(GeneratorResumeKind::Return)); + return static_cast(value); +} + +inline GeneratorResumeKind ResumeKindFromPC(jsbytecode* pc) { + MOZ_ASSERT(JSOp(*pc) == JSOp::ResumeKind); + return IntToResumeKind(GET_UINT8(pc)); +} + +#if defined(DEBUG) || defined(JS_JITSPEW) + +enum class DisassembleSkeptically { No, Yes }; + +/* + * Disassemblers, for debugging only. + */ +extern MOZ_MUST_USE bool Disassemble( + JSContext* cx, JS::Handle script, bool lines, Sprinter* sp, + DisassembleSkeptically skeptically = DisassembleSkeptically::No); + +unsigned Disassemble1(JSContext* cx, JS::Handle script, + jsbytecode* pc, unsigned loc, bool lines, Sprinter* sp); + +#endif + +extern MOZ_MUST_USE bool DumpRealmPCCounts(JSContext* cx); + +} // namespace js + +#endif /* vm_BytecodeUtil_h */ diff --git a/js/src/vm/Caches-inl.h b/js/src/vm/Caches-inl.h new file mode 100644 index 0000000000..eb65085cd5 --- /dev/null +++ b/js/src/vm/Caches-inl.h @@ -0,0 +1,86 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_Caches_inl_h +#define vm_Caches_inl_h + +#include "vm/Caches.h" + +#include + +#include "gc/Allocator.h" +#include "gc/GCProbes.h" +#include "vm/Probes.h" +#include "vm/Realm.h" + +#include "vm/JSObject-inl.h" + +namespace js { + +inline bool NewObjectCache::lookupProto(const JSClass* clasp, JSObject* proto, + gc::AllocKind kind, + EntryIndex* pentry) { + MOZ_ASSERT(!proto->is()); + return lookup(clasp, proto, kind, pentry); +} + +inline bool NewObjectCache::lookupGlobal(const JSClass* clasp, + GlobalObject* global, + gc::AllocKind kind, + EntryIndex* pentry) { + return lookup(clasp, global, kind, pentry); +} + +inline void NewObjectCache::fillGlobal(EntryIndex entry, const JSClass* clasp, + GlobalObject* global, gc::AllocKind kind, + NativeObject* obj) { + // MOZ_ASSERT(global == obj->getGlobal()); + return fill(entry, clasp, global, kind, obj); +} + +inline NativeObject* NewObjectCache::newObjectFromHit(JSContext* cx, + EntryIndex entryIndex, + gc::InitialHeap heap) { + MOZ_ASSERT(unsigned(entryIndex) < std::size(entries)); + Entry* entry = &entries[entryIndex]; + + NativeObject* templateObj = + reinterpret_cast(&entry->templateObject); + + ObjectGroup* group = templateObj->group(); + + // If we did the lookup based on the proto we might have a group/object from a + // different (same-compartment) realm, so we have to do a realm check. + if (group->realm() != cx->realm()) { + return nullptr; + } + + if (cx->runtime()->gc.upcomingZealousGC()) { + return nullptr; + } + + NativeObject* obj = static_cast(AllocateObject( + cx, entry->kind, /* nDynamicSlots = */ 0, heap, group->clasp())); + if (!obj) { + return nullptr; + } + + copyCachedToObject(obj, templateObj, entry->kind); + + if (group->clasp()->shouldDelayMetadataBuilder()) { + cx->realm()->setObjectPendingMetadata(cx, obj); + } else { + obj = static_cast(SetNewObjectMetadata(cx, obj)); + } + + probes::CreateObject(cx, obj); + gc::gcprobes::CreateObject(obj); + return obj; +} + +} /* namespace js */ + +#endif /* vm_Caches_inl_h */ diff --git a/js/src/vm/Caches.cpp b/js/src/vm/Caches.cpp new file mode 100644 index 0000000000..027056efff --- /dev/null +++ b/js/src/vm/Caches.cpp @@ -0,0 +1,23 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/Caches-inl.h" + +#include "mozilla/PodOperations.h" + +using namespace js; + +using mozilla::PodZero; + +void NewObjectCache::clearNurseryObjects(JSRuntime* rt) { + for (auto& e : entries) { + NativeObject* obj = reinterpret_cast(&e.templateObject); + if (IsInsideNursery(e.key) || rt->gc.nursery().isInside(obj->slots_) || + rt->gc.nursery().isInside(obj->elements_)) { + PodZero(&e); + } + } +} diff --git a/js/src/vm/Caches.h b/js/src/vm/Caches.h new file mode 100644 index 0000000000..8f03a24aaa --- /dev/null +++ b/js/src/vm/Caches.h @@ -0,0 +1,294 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_Caches_h +#define vm_Caches_h + +#include +#include + +#include "frontend/SourceNotes.h" // SrcNote +#include "gc/Tracer.h" +#include "js/RootingAPI.h" +#include "js/TypeDecls.h" +#include "js/UniquePtr.h" +#include "util/Memory.h" +#include "vm/ArrayObject.h" +#include "vm/JSAtom.h" +#include "vm/JSObject.h" +#include "vm/JSScript.h" +#include "vm/NativeObject.h" + +namespace js { + +/* + * GetSrcNote cache to avoid O(n^2) growth in finding a source note for a + * given pc in a script. We use the script->code pointer to tag the cache, + * instead of the script address itself, so that source notes are always found + * by offset from the bytecode with which they were generated. + */ +struct GSNCache { + typedef HashMap, + SystemAllocPolicy> + Map; + + jsbytecode* code; + Map map; + + GSNCache() : code(nullptr) {} + + void purge(); +}; + +struct EvalCacheEntry { + JSLinearString* str; + JSScript* script; + JSScript* callerScript; + jsbytecode* pc; + + // We sweep this cache before a nursery collection to remove entries with + // string keys in the nursery. + // + // The entire cache is purged on a major GC, so we don't need to sweep it + // then. + bool needsSweep() { return !str->isTenured(); } +}; + +struct EvalCacheLookup { + explicit EvalCacheLookup(JSContext* cx) : str(cx), callerScript(cx) {} + RootedLinearString str; + RootedScript callerScript; + MOZ_INIT_OUTSIDE_CTOR jsbytecode* pc; +}; + +struct EvalCacheHashPolicy { + using Lookup = EvalCacheLookup; + + static HashNumber hash(const Lookup& l); + static bool match(const EvalCacheEntry& entry, const EvalCacheLookup& l); +}; + +typedef GCHashSet + EvalCache; + +/* + * Cache for speeding up repetitive creation of objects in the VM. + * When an object is created which matches the criteria in the 'key' section + * below, an entry is filled with the resulting object. + */ +class NewObjectCache { + /* Statically asserted to be equal to sizeof(JSObject_Slots16) */ + static const unsigned MAX_OBJ_SIZE = 4 * sizeof(void*) + 16 * sizeof(Value); + + static void staticAsserts() { + static_assert(NewObjectCache::MAX_OBJ_SIZE == sizeof(JSObject_Slots16)); + static_assert(gc::AllocKind::OBJECT_LAST == + gc::AllocKind::OBJECT16_BACKGROUND); + } + + struct Entry { + /* Class of the constructed object. */ + const JSClass* clasp; + + /* + * Key with one of three possible values: + * + * - Global for the object. The object must have a standard class for + * which the global's prototype can be determined, and the object's + * parent will be the global. + * + * - Prototype for the object (cannot be global). The object's parent + * will be the prototype's parent. + * + * - Type for the object. The object's parent will be the type's + * prototype's parent. + */ + gc::Cell* key; + + /* Allocation kind for the constructed object. */ + gc::AllocKind kind; + + /* Number of bytes to copy from the template object. */ + uint32_t nbytes; + + /* + * Template object to copy from, with the initial values of fields, + * fixed slots (undefined) and private data (nullptr). + */ + char templateObject[MAX_OBJ_SIZE]; + }; + + using EntryArray = Entry[41]; // TODO: reconsider size; + EntryArray entries; + + public: + using EntryIndex = int; + + NewObjectCache() + : entries{} // zeroes out the array + {} + + void purge() { + new (&entries) EntryArray{}; // zeroes out the array + } + + /* Remove any cached items keyed on moved objects. */ + void clearNurseryObjects(JSRuntime* rt); + + /* + * Get the entry index for the given lookup, return whether there was a hit + * on an existing entry. + */ + inline bool lookupProto(const JSClass* clasp, JSObject* proto, + gc::AllocKind kind, EntryIndex* pentry); + inline bool lookupGlobal(const JSClass* clasp, js::GlobalObject* global, + gc::AllocKind kind, EntryIndex* pentry); + + bool lookupGroup(js::ObjectGroup* group, gc::AllocKind kind, + EntryIndex* pentry) { + return lookup(group->clasp(), group, kind, pentry); + } + + /* + * Return a new object from a cache hit produced by a lookup method, or + * nullptr if returning the object could possibly trigger GC (does not + * indicate failure). + */ + inline NativeObject* newObjectFromHit(JSContext* cx, EntryIndex entry, + js::gc::InitialHeap heap); + + /* Fill an entry after a cache miss. */ + void fillProto(EntryIndex entry, const JSClass* clasp, js::TaggedProto proto, + gc::AllocKind kind, NativeObject* obj); + + inline void fillGlobal(EntryIndex entry, const JSClass* clasp, + js::GlobalObject* global, gc::AllocKind kind, + NativeObject* obj); + + void fillGroup(EntryIndex entry, js::ObjectGroup* group, gc::AllocKind kind, + NativeObject* obj) { + MOZ_ASSERT(obj->group() == group); + return fill(entry, group->clasp(), group, kind, obj); + } + + /* Invalidate any entries which might produce an object with shape/proto. */ + void invalidateEntriesForShape(JSContext* cx, HandleShape shape, + HandleObject proto); + + private: + EntryIndex makeIndex(const JSClass* clasp, gc::Cell* key, + gc::AllocKind kind) { + uintptr_t hash = (uintptr_t(clasp) ^ uintptr_t(key)) + size_t(kind); + return hash % std::size(entries); + } + + bool lookup(const JSClass* clasp, gc::Cell* key, gc::AllocKind kind, + EntryIndex* pentry) { + *pentry = makeIndex(clasp, key, kind); + Entry* entry = &entries[*pentry]; + + // N.B. Lookups with the same clasp/key but different kinds map to + // different entries. + return entry->clasp == clasp && entry->key == key; + } + + void fill(EntryIndex entry_, const JSClass* clasp, gc::Cell* key, + gc::AllocKind kind, NativeObject* obj) { + MOZ_ASSERT(unsigned(entry_) < std::size(entries)); + MOZ_ASSERT(entry_ == makeIndex(clasp, key, kind)); + Entry* entry = &entries[entry_]; + + MOZ_ASSERT(!obj->hasDynamicSlots()); + MOZ_ASSERT(obj->hasEmptyElements() || obj->is()); + + entry->clasp = clasp; + entry->key = key; + entry->kind = kind; + + entry->nbytes = gc::Arena::thingSize(kind); + js_memcpy(&entry->templateObject, obj, entry->nbytes); + } + + static void copyCachedToObject(NativeObject* dst, NativeObject* src, + gc::AllocKind kind) { + js_memcpy(dst, src, gc::Arena::thingSize(kind)); + + // Initialize with barriers + dst->initGroup(src->group()); + dst->initShape(src->shape()); + } +}; + +// Cache for AtomizeString, mapping JSLinearString* to the corresponding +// JSAtom*. Also used by nursery GC to de-duplicate strings to atoms. +// Purged on minor and major GC. +class StringToAtomCache { + using Map = HashMap, + SystemAllocPolicy>; + Map map_; + + public: + // Don't use the cache for short strings. Hashing them is less expensive. + static constexpr size_t MinStringLength = 30; + + JSAtom* lookup(JSLinearString* s) { + MOZ_ASSERT(!s->isAtom()); + if (!s->inStringToAtomCache()) { + MOZ_ASSERT(!map_.lookup(s)); + return nullptr; + } + + MOZ_ASSERT(s->length() >= MinStringLength); + + auto p = map_.lookup(s); + JSAtom* atom = p ? p->value() : nullptr; + MOZ_ASSERT_IF(atom, EqualStrings(s, atom)); + return atom; + } + + void maybePut(JSLinearString* s, JSAtom* atom) { + MOZ_ASSERT(!s->isAtom()); + if (s->length() < MinStringLength) { + return; + } + if (!map_.putNew(s, atom)) { + return; + } + s->setInStringToAtomCache(); + } + + void purge() { map_.clearAndCompact(); } +}; + +class RuntimeCaches { + public: + js::GSNCache gsnCache; + js::NewObjectCache newObjectCache; + js::UncompressedSourceCache uncompressedSourceCache; + js::EvalCache evalCache; + js::StringToAtomCache stringToAtomCache; + + void purgeForMinorGC(JSRuntime* rt) { + newObjectCache.clearNurseryObjects(rt); + evalCache.sweep(); + } + + void purgeForCompaction() { + newObjectCache.purge(); + evalCache.clear(); + stringToAtomCache.purge(); + } + + void purge() { + purgeForCompaction(); + gsnCache.purge(); + uncompressedSourceCache.purge(); + } +}; + +} // namespace js + +#endif /* vm_Caches_h */ diff --git a/js/src/vm/CallNonGenericMethod.cpp b/js/src/vm/CallNonGenericMethod.cpp new file mode 100644 index 0000000000..512c84f94d --- /dev/null +++ b/js/src/vm/CallNonGenericMethod.cpp @@ -0,0 +1,35 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "js/CallNonGenericMethod.h" + +#include "proxy/Proxy.h" +#include "vm/JSFunction.h" +#include "vm/JSObject.h" +#include "vm/ProxyObject.h" +#include "vm/SelfHosting.h" + +using namespace js; + +bool JS::detail::CallMethodIfWrapped(JSContext* cx, IsAcceptableThis test, + NativeImpl impl, const CallArgs& args) { + HandleValue thisv = args.thisv(); + MOZ_ASSERT(!test(thisv)); + + if (thisv.isObject()) { + JSObject& thisObj = args.thisv().toObject(); + if (thisObj.is()) { + return Proxy::nativeCall(cx, test, impl, args); + } + } + + if (IsCallSelfHostedNonGenericMethod(impl)) { + return ReportIncompatibleSelfHostedMethod(cx, args); + } + + ReportIncompatible(cx, args); + return false; +} diff --git a/js/src/vm/CharacterEncoding.cpp b/js/src/vm/CharacterEncoding.cpp new file mode 100644 index 0000000000..3eb98f6854 --- /dev/null +++ b/js/src/vm/CharacterEncoding.cpp @@ -0,0 +1,697 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "js/CharacterEncoding.h" + +#include "mozilla/Latin1.h" +#include "mozilla/Range.h" +#include "mozilla/Span.h" +#include "mozilla/Sprintf.h" +#include "mozilla/TextUtils.h" +#include "mozilla/Utf8.h" + +#include +#include + +#include "js/friend/ErrorMessages.h" // js::GetErrorMessage, JSMSG_* +#include "util/StringBuffer.h" +#include "util/Unicode.h" // unicode::REPLACEMENT_CHARACTER +#include "vm/JSContext.h" + +using mozilla::AsChars; +using mozilla::AsciiValidUpTo; +using mozilla::AsWritableChars; +using mozilla::ConvertLatin1toUtf8Partial; +using mozilla::ConvertUtf16toUtf8Partial; +using mozilla::IsAscii; +using mozilla::IsUtf8Latin1; +using mozilla::LossyConvertUtf16toLatin1; +using mozilla::Span; +using mozilla::Tie; +using mozilla::Tuple; +using mozilla::Unused; +using mozilla::Utf8Unit; + +using JS::Latin1CharsZ; +using JS::TwoByteCharsZ; +using JS::UTF8Chars; +using JS::UTF8CharsZ; +using JS::WTF8Chars; + +using namespace js; +using namespace js::unicode; + +Latin1CharsZ JS::LossyTwoByteCharsToNewLatin1CharsZ( + JSContext* cx, const mozilla::Range tbchars) { + MOZ_ASSERT(cx); + size_t len = tbchars.length(); + unsigned char* latin1 = cx->pod_malloc(len + 1); + if (!latin1) { + return Latin1CharsZ(); + } + LossyConvertUtf16toLatin1(tbchars, AsWritableChars(Span(latin1, len))); + latin1[len] = '\0'; + return Latin1CharsZ(latin1, len); +} + +template +static size_t GetDeflatedUTF8StringLength(const CharT* chars, size_t nchars) { + size_t nbytes = nchars; + for (const CharT* end = chars + nchars; chars < end; chars++) { + char16_t c = *chars; + if (c < 0x80) { + continue; + } + uint32_t v; + if (IsSurrogate(c)) { + /* nbytes sets 1 length since this is surrogate pair. */ + if (IsTrailSurrogate(c) || (chars + 1) == end) { + nbytes += 2; /* Bad Surrogate */ + continue; + } + char16_t c2 = chars[1]; + if (!IsTrailSurrogate(c2)) { + nbytes += 2; /* Bad Surrogate */ + continue; + } + v = UTF16Decode(c, c2); + nbytes--; + chars++; + } else { + v = c; + } + v >>= 11; + nbytes++; + while (v) { + v >>= 5; + nbytes++; + } + } + return nbytes; +} + +JS_PUBLIC_API size_t JS::GetDeflatedUTF8StringLength(JSLinearString* s) { + JS::AutoCheckCannotGC nogc; + return s->hasLatin1Chars() + ? ::GetDeflatedUTF8StringLength(s->latin1Chars(nogc), s->length()) + : ::GetDeflatedUTF8StringLength(s->twoByteChars(nogc), + s->length()); +} + +JS_PUBLIC_API size_t JS::DeflateStringToUTF8Buffer(JSLinearString* src, + mozilla::Span dst) { + JS::AutoCheckCannotGC nogc; + if (src->hasLatin1Chars()) { + auto source = AsChars(Span(src->latin1Chars(nogc), src->length())); + size_t read; + size_t written; + Tie(read, written) = ConvertLatin1toUtf8Partial(source, dst); + Unused << read; + return written; + } + auto source = Span(src->twoByteChars(nogc), src->length()); + size_t read; + size_t written; + Tie(read, written) = ConvertUtf16toUtf8Partial(source, dst); + Unused << read; + return written; +} + +template +void ConvertToUTF8(mozilla::Span src, mozilla::Span dst); + +template <> +void ConvertToUTF8(mozilla::Span src, + mozilla::Span dst) { + Unused << ConvertUtf16toUtf8Partial(src, dst); +} + +template <> +void ConvertToUTF8(mozilla::Span src, + mozilla::Span dst) { + Unused << ConvertLatin1toUtf8Partial(AsChars(src), dst); +} + +template +UTF8CharsZ JS::CharsToNewUTF8CharsZ(JSContext* maybeCx, + const mozilla::Range chars) { + /* Get required buffer size. */ + const CharT* str = chars.begin().get(); + size_t len = ::GetDeflatedUTF8StringLength(str, chars.length()); + + /* Allocate buffer. */ + char* utf8; + if (maybeCx) { + utf8 = maybeCx->pod_malloc(len + 1); + } else { + utf8 = js_pod_malloc(len + 1); + } + if (!utf8) { + return UTF8CharsZ(); + } + + /* Encode to UTF8. */ + ::ConvertToUTF8(Span(str, chars.length()), Span(utf8, len)); + utf8[len] = '\0'; + + return UTF8CharsZ(utf8, len); +} + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + JSContext* maybeCx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + JSContext* maybeCx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + JSContext* maybeCx, const mozilla::Range chars); + +template UTF8CharsZ JS::CharsToNewUTF8CharsZ( + JSContext* maybeCx, const mozilla::Range chars); + +static const uint32_t INVALID_UTF8 = UINT32_MAX; + +/* + * Convert a UTF-8 or WTF-8 (depending on InputCharsT, which is either + * UTF8Chars or WTF8Chars) character sequence into a UCS-4 character and return + * that character. It is assumed that the caller already checked that the + * sequence is valid. + */ +template +static uint32_t Utf8ToOneUcs4CharImpl(const uint8_t* utf8Buffer, + int utf8Length) { + static_assert(std::is_same_v || + std::is_same_v, + "must be either UTF-8 or WTF-8"); + MOZ_ASSERT(1 <= utf8Length && utf8Length <= 4); + + if (utf8Length == 1) { + MOZ_ASSERT(!(*utf8Buffer & 0x80)); + return *utf8Buffer; + } + + /* from Unicode 3.1, non-shortest form is illegal */ + static const uint32_t minucs4Table[] = {0x80, 0x800, NonBMPMin}; + + MOZ_ASSERT((*utf8Buffer & (0x100 - (1 << (7 - utf8Length)))) == + (0x100 - (1 << (8 - utf8Length)))); + uint32_t ucs4Char = *utf8Buffer++ & ((1 << (7 - utf8Length)) - 1); + uint32_t minucs4Char = minucs4Table[utf8Length - 2]; + while (--utf8Length) { + MOZ_ASSERT((*utf8Buffer & 0xC0) == 0x80); + ucs4Char = (ucs4Char << 6) | (*utf8Buffer++ & 0x3F); + } + + if (MOZ_UNLIKELY(ucs4Char < minucs4Char)) { + return INVALID_UTF8; + } + + // WTF-8 allows lone surrogate. + if (std::is_same_v && + MOZ_UNLIKELY(IsSurrogate(ucs4Char))) { + return INVALID_UTF8; + } + + return ucs4Char; +} + +uint32_t JS::Utf8ToOneUcs4Char(const uint8_t* utf8Buffer, int utf8Length) { + return Utf8ToOneUcs4CharImpl(utf8Buffer, utf8Length); +} + +static void ReportInvalidCharacter(JSContext* cx, uint32_t offset) { + char buffer[10]; + SprintfLiteral(buffer, "%u", offset); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_MALFORMED_UTF8_CHAR, buffer); +} + +static void ReportBufferTooSmall(JSContext* cx, uint32_t dummy) { + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_BUFFER_TOO_SMALL); +} + +static void ReportTooBigCharacter(JSContext* cx, uint32_t v) { + char buffer[10]; + SprintfLiteral(buffer, "0x%x", v); + JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, + JSMSG_UTF8_CHAR_TOO_LARGE, buffer); +} + +enum class LoopDisposition { + Break, + Continue, +}; + +enum class OnUTF8Error { + InsertReplacementCharacter, + InsertQuestionMark, + Throw, + Crash, +}; + +// Scan UTF-8 or WTF-8 input and (internally, at least) convert it to a series +// of UTF-16 code units. But you can also do odd things like pass an empty +// lambda for `dst`, in which case the output is discarded entirely--the only +// effect of calling the template that way is error-checking. +template +static bool InflateUTF8ToUTF16(JSContext* cx, const InputCharsT src, + OutputFn dst) { + size_t srclen = src.length(); + for (uint32_t i = 0; i < srclen; i++) { + uint32_t v = uint32_t(src[i]); + if (!(v & 0x80)) { + // ASCII code unit. Simple copy. + if (dst(uint16_t(v)) == LoopDisposition::Break) { + break; + } + } else { + // Non-ASCII code unit. Determine its length in bytes (n). + uint32_t n = 1; + while (v & (0x80 >> n)) { + n++; + } + +#define INVALID(report, arg, n2) \ + do { \ + if (ErrorAction == OnUTF8Error::Throw) { \ + report(cx, arg); \ + return false; \ + } else if (ErrorAction == OnUTF8Error::Crash) { \ + MOZ_CRASH("invalid UTF-8 string: " #report); \ + } else { \ + char16_t replacement; \ + if (ErrorAction == OnUTF8Error::InsertReplacementCharacter) { \ + replacement = REPLACEMENT_CHARACTER; \ + } else { \ + MOZ_ASSERT(ErrorAction == OnUTF8Error::InsertQuestionMark); \ + replacement = '?'; \ + } \ + if (dst(replacement) == LoopDisposition::Break) { \ + break; \ + } \ + n = n2; \ + goto invalidMultiByteCodeUnit; \ + } \ + } while (0) + + // Check the leading byte. + if (n < 2 || n > 4) { + INVALID(ReportInvalidCharacter, i, 1); + } + + // Check that |src| is large enough to hold an n-byte code unit. + if (i + n > srclen) { + INVALID(ReportBufferTooSmall, /* dummy = */ 0, 1); + } + + // Check the second byte. From Unicode Standard v6.2, Table 3-7 + // Well-Formed UTF-8 Byte Sequences. + if ((v == 0xE0 && ((uint8_t)src[i + 1] & 0xE0) != 0xA0) || // E0 A0~BF + (v == 0xED && ((uint8_t)src[i + 1] & 0xE0) != 0x80) || // ED 80~9F + (v == 0xF0 && ((uint8_t)src[i + 1] & 0xF0) == 0x80) || // F0 90~BF + (v == 0xF4 && ((uint8_t)src[i + 1] & 0xF0) != 0x80)) // F4 80~8F + { + if constexpr (std::is_same_v) { + INVALID(ReportInvalidCharacter, i, 1); + } else { + // WTF-8 allows lone surrogate as ED A0~BF 80~BF. + static_assert(std::is_same_v); + if (v == 0xED && ((uint8_t)src[i + 1] & 0xE0) != 0xA0) { // ED A0~BF + INVALID(ReportInvalidCharacter, i, 1); + } + } + } + + // Check the continuation bytes. + for (uint32_t m = 1; m < n; m++) { + if ((src[i + m] & 0xC0) != 0x80) { + INVALID(ReportInvalidCharacter, i, m); + } + } + + // Determine the code unit's length in CharT and act accordingly. + v = Utf8ToOneUcs4CharImpl((uint8_t*)&src[i], n); + if (v < NonBMPMin) { + // The n-byte UTF8 code unit will fit in a single CharT. + if (dst(char16_t(v)) == LoopDisposition::Break) { + break; + } + } else if (v <= NonBMPMax) { + // The n-byte UTF8 code unit will fit in two CharT units. + if (dst(LeadSurrogate(v)) == LoopDisposition::Break) { + break; + } + if (dst(TrailSurrogate(v)) == LoopDisposition::Break) { + break; + } + } else { + // The n-byte UTF8 code unit won't fit in two CharT units. + INVALID(ReportTooBigCharacter, v, 1); + } + + invalidMultiByteCodeUnit: + // Move i to the last byte of the multi-byte code unit; the loop + // header will do the final i++ to move to the start of the next + // code unit. + i += n - 1; + } + } + + return true; +} + +template +static void CopyAndInflateUTF8IntoBuffer(JSContext* cx, const InputCharsT src, + CharT* dst, size_t outlen, + bool allASCII) { + if (allASCII) { + size_t srclen = src.length(); + MOZ_ASSERT(outlen == srclen); + for (uint32_t i = 0; i < srclen; i++) { + dst[i] = CharT(src[i]); + } + } else { + size_t j = 0; + auto push = [dst, &j](char16_t c) -> LoopDisposition { + dst[j++] = CharT(c); + return LoopDisposition::Continue; + }; + MOZ_ALWAYS_TRUE((InflateUTF8ToUTF16(cx, src, push))); + MOZ_ASSERT(j == outlen); + } + dst[outlen] = CharT('\0'); // NUL char +} + +template +static CharsT InflateUTF8StringHelper(JSContext* cx, const InputCharsT src, + size_t* outlen, arena_id_t destArenaId) { + using CharT = typename CharsT::CharT; + static_assert( + std::is_same_v || std::is_same_v, + "bad CharT"); + + *outlen = 0; + + size_t len = 0; + bool allASCII = true; + auto count = [&len, &allASCII](char16_t c) -> LoopDisposition { + len++; + allASCII &= (c < 0x80); + return LoopDisposition::Continue; + }; + if (!InflateUTF8ToUTF16(cx, src, count)) { + return CharsT(); + } + *outlen = len; + + CharT* dst = cx->pod_arena_malloc(destArenaId, + *outlen + 1); // +1 for NUL + + if (!dst) { + ReportOutOfMemory(cx); + return CharsT(); + } + + constexpr OnUTF8Error errorMode = + std::is_same_v + ? OnUTF8Error::InsertQuestionMark + : OnUTF8Error::InsertReplacementCharacter; + CopyAndInflateUTF8IntoBuffer(cx, src, dst, *outlen, allASCII); + + return CharsT(dst, *outlen); +} + +TwoByteCharsZ JS::UTF8CharsToNewTwoByteCharsZ(JSContext* cx, + const UTF8Chars utf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper( + cx, utf8, outlen, destArenaId); +} + +TwoByteCharsZ JS::WTF8CharsToNewTwoByteCharsZ(JSContext* cx, + const WTF8Chars wtf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper( + cx, wtf8, outlen, destArenaId); +} + +TwoByteCharsZ JS::UTF8CharsToNewTwoByteCharsZ(JSContext* cx, + const ConstUTF8CharsZ& utf8, + size_t* outlen, + arena_id_t destArenaId) { + UTF8Chars chars(utf8.c_str(), strlen(utf8.c_str())); + return InflateUTF8StringHelper( + cx, chars, outlen, destArenaId); +} + +TwoByteCharsZ JS::LossyUTF8CharsToNewTwoByteCharsZ(JSContext* cx, + const JS::UTF8Chars utf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper(cx, utf8, outlen, destArenaId); +} + +TwoByteCharsZ JS::LossyUTF8CharsToNewTwoByteCharsZ( + JSContext* cx, const JS::ConstUTF8CharsZ& utf8, size_t* outlen, + arena_id_t destArenaId) { + UTF8Chars chars(utf8.c_str(), strlen(utf8.c_str())); + return InflateUTF8StringHelper(cx, chars, outlen, destArenaId); +} + +static void UpdateSmallestEncodingForChar(char16_t c, + JS::SmallestEncoding* encoding) { + JS::SmallestEncoding newEncoding = JS::SmallestEncoding::ASCII; + if (c >= 0x80) { + if (c < 0x100) { + newEncoding = JS::SmallestEncoding::Latin1; + } else { + newEncoding = JS::SmallestEncoding::UTF16; + } + } + if (newEncoding > *encoding) { + *encoding = newEncoding; + } +} + +JS::SmallestEncoding JS::FindSmallestEncoding(UTF8Chars utf8) { + Span unsignedSpan = utf8; + auto charSpan = AsChars(unsignedSpan); + size_t upTo = AsciiValidUpTo(charSpan); + if (upTo == charSpan.Length()) { + return SmallestEncoding::ASCII; + } + if (IsUtf8Latin1(charSpan.From(upTo))) { + return SmallestEncoding::Latin1; + } + return SmallestEncoding::UTF16; +} + +Latin1CharsZ JS::UTF8CharsToNewLatin1CharsZ(JSContext* cx, const UTF8Chars utf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper( + cx, utf8, outlen, destArenaId); +} + +Latin1CharsZ JS::LossyUTF8CharsToNewLatin1CharsZ(JSContext* cx, + const UTF8Chars utf8, + size_t* outlen, + arena_id_t destArenaId) { + return InflateUTF8StringHelper( + cx, utf8, outlen, destArenaId); +} + +/** + * Atomization Helpers. + * + * These functions are extremely single-use, and are not intended for general + * consumption. + */ + +template +bool GetUTF8AtomizationData(JSContext* cx, const InputCharsT utf8, + size_t* outlen, JS::SmallestEncoding* encoding, + HashNumber* hashNum) { + *outlen = 0; + *encoding = JS::SmallestEncoding::ASCII; + *hashNum = 0; + + auto getMetadata = [outlen, encoding, + hashNum](char16_t c) -> LoopDisposition { + (*outlen)++; + UpdateSmallestEncodingForChar(c, encoding); + *hashNum = mozilla::AddToHash(*hashNum, c); + return LoopDisposition::Continue; + }; + if (!InflateUTF8ToUTF16(cx, utf8, getMetadata)) { + return false; + } + + return true; +} + +template bool GetUTF8AtomizationData( + JSContext* cx, const JS::UTF8Chars utf8, size_t* outlen, + JS::SmallestEncoding* encoding, HashNumber* hashNum); +template bool GetUTF8AtomizationData( + JSContext* cx, const JS::WTF8Chars utf8, size_t* outlen, + JS::SmallestEncoding* encoding, HashNumber* hashNum); + +template +bool UTF8OrWTF8EqualsChars(const CharsT utfChars, const CharT* chars) { + size_t ind = 0; + bool isEqual = true; + + auto checkEqual = [&isEqual, &ind, chars](char16_t c) -> LoopDisposition { +#ifdef DEBUG + JS::SmallestEncoding encoding = JS::SmallestEncoding::ASCII; + UpdateSmallestEncodingForChar(c, &encoding); + if (std::is_same_v) { + MOZ_ASSERT(encoding <= JS::SmallestEncoding::Latin1); + } else if (!std::is_same_v) { + MOZ_CRASH("Invalid character type in UTF8EqualsChars"); + } +#endif + + if (CharT(c) != chars[ind]) { + isEqual = false; + return LoopDisposition::Break; + } + + ind++; + return LoopDisposition::Continue; + }; + + // To get here, you must have checked your work. + InflateUTF8ToUTF16(/* cx = */ nullptr, utfChars, + checkEqual); + + return isEqual; +} + +template bool UTF8OrWTF8EqualsChars(const JS::UTF8Chars, + const char16_t*); +template bool UTF8OrWTF8EqualsChars(const JS::UTF8Chars, + const JS::Latin1Char*); +template bool UTF8OrWTF8EqualsChars(const JS::WTF8Chars, + const char16_t*); +template bool UTF8OrWTF8EqualsChars(const JS::WTF8Chars, + const JS::Latin1Char*); + +template +void InflateUTF8CharsToBufferAndTerminate(const InputCharsT src, CharT* dst, + size_t dstLen, + JS::SmallestEncoding encoding) { + CopyAndInflateUTF8IntoBuffer( + /* cx = */ nullptr, src, dst, dstLen, + encoding == JS::SmallestEncoding::ASCII); +} + +template void InflateUTF8CharsToBufferAndTerminate( + const UTF8Chars src, char16_t* dst, size_t dstLen, + JS::SmallestEncoding encoding); +template void InflateUTF8CharsToBufferAndTerminate( + const UTF8Chars src, JS::Latin1Char* dst, size_t dstLen, + JS::SmallestEncoding encoding); +template void InflateUTF8CharsToBufferAndTerminate( + const WTF8Chars src, char16_t* dst, size_t dstLen, + JS::SmallestEncoding encoding); +template void InflateUTF8CharsToBufferAndTerminate( + const WTF8Chars src, JS::Latin1Char* dst, size_t dstLen, + JS::SmallestEncoding encoding); + +#ifdef DEBUG +void JS::ConstUTF8CharsZ::validate(size_t aLength) { + MOZ_ASSERT(data_); + UTF8Chars chars(data_, aLength); + auto nop = [](char16_t) -> LoopDisposition { + return LoopDisposition::Continue; + }; + InflateUTF8ToUTF16(/* cx = */ nullptr, chars, nop); +} +#endif + +bool JS::StringIsASCII(const char* s) { + while (*s) { + if (*s & 0x80) { + return false; + } + s++; + } + return true; +} + +bool JS::StringIsASCII(Span s) { return IsAscii(s); } + +bool StringBuffer::append(const Utf8Unit* units, size_t len) { + if (isLatin1()) { + Latin1CharBuffer& latin1 = latin1Chars(); + + while (len > 0) { + if (!IsAscii(*units)) { + break; + } + + if (!latin1.append(units->toUnsignedChar())) { + return false; + } + + ++units; + --len; + } + if (len == 0) { + return true; + } + + // Non-ASCII doesn't *necessarily* mean we couldn't keep appending to + // |latin1|, but it's only possible for [U+0080, U+0100) code points, + // and handling the full complexity of UTF-8 only for that very small + // additional range isn't worth it. Inflate to two-byte storage before + // appending the remaining code points. + if (!inflateChars()) { + return false; + } + } + + UTF8Chars remainingUtf8(units, len); + + // Determine how many UTF-16 code units are required to represent the + // remaining units. + size_t utf16Len = 0; + auto countInflated = [&utf16Len](char16_t c) -> LoopDisposition { + utf16Len++; + return LoopDisposition::Continue; + }; + if (!InflateUTF8ToUTF16(cx_, remainingUtf8, + countInflated)) { + return false; + } + + TwoByteCharBuffer& buf = twoByteChars(); + + size_t i = buf.length(); + if (!buf.growByUninitialized(utf16Len)) { + return false; + } + MOZ_ASSERT(i + utf16Len == buf.length(), + "growByUninitialized assumed to increase length immediately"); + + char16_t* toFill = &buf[i]; + auto appendUtf16 = [&toFill](char16_t unit) { + *toFill++ = unit; + return LoopDisposition::Continue; + }; + + MOZ_ALWAYS_TRUE( + InflateUTF8ToUTF16(cx_, remainingUtf8, appendUtf16)); + MOZ_ASSERT(toFill == buf.end()); + return true; +} diff --git a/js/src/vm/CheckIsObjectKind.h b/js/src/vm/CheckIsObjectKind.h new file mode 100644 index 0000000000..321870d6ed --- /dev/null +++ b/js/src/vm/CheckIsObjectKind.h @@ -0,0 +1,24 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#ifndef vm_CheckIsObjectKind_h +#define vm_CheckIsObjectKind_h + +#include // uint8_t + +namespace js { + +enum class CheckIsObjectKind : uint8_t { + IteratorNext, + IteratorReturn, + IteratorThrow, + GetIterator, + GetAsyncIterator +}; + +} // namespace js + +#endif /* vm_CheckIsObjectKind_h */ diff --git a/js/src/vm/CodeCoverage.cpp b/js/src/vm/CodeCoverage.cpp new file mode 100644 index 0000000000..f96cfdff68 --- /dev/null +++ b/js/src/vm/CodeCoverage.cpp @@ -0,0 +1,685 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * This Source Code Form is subject to the terms of the Mozilla Public + * License, v. 2.0. If a copy of the MPL was not distributed with this + * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ + +#include "vm/CodeCoverage.h" + +#include "mozilla/Atomics.h" +#include "mozilla/IntegerPrintfMacros.h" + +#include +#include +#ifdef XP_WIN +# include +# define getpid _getpid +#else +# include +#endif + +#include "frontend/SourceNotes.h" // SrcNote, SrcNoteType, SrcNoteIterator +#include "gc/Zone.h" +#include "util/Text.h" +#include "vm/BytecodeUtil.h" +#include "vm/JSScript.h" +#include "vm/Realm.h" +#include "vm/Runtime.h" +#include "vm/Time.h" + +// This file contains a few functions which are used to produce files understood +// by lcov tools. A detailed description of the format is available in the man +// page for "geninfo" [1]. To make it short, the following paraphrases what is +// commented in the man page by using curly braces prefixed by for-each to +// express repeated patterns. +// +// TN: +// for-each { +// SN: +// for-each