diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-28 14:29:10 +0000 |
commit | 2aa4a82499d4becd2284cdb482213d541b8804dd (patch) | |
tree | b80bf8bf13c3766139fbacc530efd0dd9d54394c /js/src/wasm/WasmGC.cpp | |
parent | Initial commit. (diff) | |
download | firefox-upstream.tar.xz firefox-upstream.zip |
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | js/src/wasm/WasmGC.cpp | 261 |
1 files changed, 261 insertions, 0 deletions
diff --git a/js/src/wasm/WasmGC.cpp b/js/src/wasm/WasmGC.cpp new file mode 100644 index 0000000000..48bfd256dd --- /dev/null +++ b/js/src/wasm/WasmGC.cpp @@ -0,0 +1,261 @@ +/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- + * vim: set ts=8 sts=2 et sw=2 tw=80: + * + * Copyright 2019 Mozilla Foundation + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "wasm/WasmGC.h" +#include "wasm/WasmInstance.h" +#include "jit/MacroAssembler-inl.h" + +namespace js { +namespace wasm { + +wasm::StackMap* ConvertStackMapBoolVectorToStackMap( + const StackMapBoolVector& vec, bool hasRefs) { + wasm::StackMap* stackMap = wasm::StackMap::create(vec.length()); + if (!stackMap) { + return nullptr; + } + + bool hasRefsObserved = false; + size_t i = 0; + for (bool b : vec) { + if (b) { + stackMap->setBit(i); + hasRefsObserved = true; + } + i++; + } + MOZ_RELEASE_ASSERT(hasRefs == hasRefsObserved); + + return stackMap; +} + +// Generate a stackmap for a function's stack-overflow-at-entry trap, with +// the structure: +// +// <reg dump area> +// | ++ <space reserved before trap, if any> +// | ++ <space for Frame> +// | ++ <inbound arg area> +// | | +// Lowest Addr Highest Addr +// +// The caller owns the resulting stackmap. This assumes a grow-down stack. +// +// For non-debug builds, if the stackmap would contain no pointers, no +// stackmap is created, and nullptr is returned. For a debug build, a +// stackmap is always created and returned. +// +// The "space reserved before trap" is the space reserved by +// MacroAssembler::wasmReserveStackChecked, in the case where the frame is +// "small", as determined by that function. +bool CreateStackMapForFunctionEntryTrap(const wasm::ArgTypeVector& argTypes, + const MachineState& trapExitLayout, + size_t trapExitLayoutWords, + size_t nBytesReservedBeforeTrap, + size_t nInboundStackArgBytes, + wasm::StackMap** result) { + // Ensure this is defined on all return paths. + *result = nullptr; + + // The size of the wasm::Frame itself. + const size_t nFrameBytes = sizeof(wasm::Frame); + + // The size of the register dump (trap) area. + const size_t trapExitLayoutBytes = trapExitLayoutWords * sizeof(void*); + + // This is the total number of bytes covered by the map. + const DebugOnly<size_t> nTotalBytes = trapExitLayoutBytes + + nBytesReservedBeforeTrap + nFrameBytes + + nInboundStackArgBytes; + + // Create the stackmap initially in this vector. Since most frames will + // contain 128 or fewer words, heap allocation is avoided in the majority of + // cases. vec[0] is for the lowest address in the map, vec[N-1] is for the + // highest address in the map. + StackMapBoolVector vec; + + // Keep track of whether we've actually seen any refs. + bool hasRefs = false; + + // REG DUMP AREA + wasm::ExitStubMapVector trapExitExtras; + if (!GenerateStackmapEntriesForTrapExit( + argTypes, trapExitLayout, trapExitLayoutWords, &trapExitExtras)) { + return false; + } + MOZ_ASSERT(trapExitExtras.length() == trapExitLayoutWords); + + if (!vec.appendN(false, trapExitLayoutWords)) { + return false; + } + for (size_t i = 0; i < trapExitLayoutWords; i++) { + vec[i] = trapExitExtras[i]; + hasRefs |= vec[i]; + } + + // SPACE RESERVED BEFORE TRAP + MOZ_ASSERT(nBytesReservedBeforeTrap % sizeof(void*) == 0); + if (!vec.appendN(false, nBytesReservedBeforeTrap / sizeof(void*))) { + return false; + } + + // SPACE FOR FRAME + if (!vec.appendN(false, nFrameBytes / sizeof(void*))) { + return false; + } + + // INBOUND ARG AREA + MOZ_ASSERT(nInboundStackArgBytes % sizeof(void*) == 0); + const size_t numStackArgWords = nInboundStackArgBytes / sizeof(void*); + + const size_t wordsSoFar = vec.length(); + if (!vec.appendN(false, numStackArgWords)) { + return false; + } + + for (WasmABIArgIter i(argTypes); !i.done(); i++) { + ABIArg argLoc = *i; + if (argLoc.kind() == ABIArg::Stack && + argTypes[i.index()] == MIRType::RefOrNull) { + uint32_t offset = argLoc.offsetFromArgBase(); + MOZ_ASSERT(offset < nInboundStackArgBytes); + MOZ_ASSERT(offset % sizeof(void*) == 0); + vec[wordsSoFar + offset / sizeof(void*)] = true; + hasRefs = true; + } + } + +#ifndef DEBUG + // We saw no references, and this is a non-debug build, so don't bother + // building the stackmap. + if (!hasRefs) { + return true; + } +#endif + + // Convert vec into a wasm::StackMap. + MOZ_ASSERT(vec.length() * sizeof(void*) == nTotalBytes); + wasm::StackMap* stackMap = ConvertStackMapBoolVectorToStackMap(vec, hasRefs); + if (!stackMap) { + return false; + } + stackMap->setExitStubWords(trapExitLayoutWords); + + stackMap->setFrameOffsetFromTop(nFrameBytes / sizeof(void*) + + numStackArgWords); +#ifdef DEBUG + for (uint32_t i = 0; i < nFrameBytes / sizeof(void*); i++) { + MOZ_ASSERT(stackMap->getBit(stackMap->numMappedWords - + stackMap->frameOffsetFromTop + i) == 0); + } +#endif + + *result = stackMap; + return true; +} + +bool GenerateStackmapEntriesForTrapExit(const ArgTypeVector& args, + const MachineState& trapExitLayout, + const size_t trapExitLayoutNumWords, + ExitStubMapVector* extras) { + MOZ_ASSERT(extras->empty()); + + // If this doesn't hold, we can't distinguish saved and not-saved + // registers in the MachineState. See MachineState::MachineState(). + MOZ_ASSERT(trapExitLayoutNumWords < 0x100); + + if (!extras->appendN(false, trapExitLayoutNumWords)) { + return false; + } + + for (WasmABIArgIter i(args); !i.done(); i++) { + if (!i->argInRegister() || i.mirType() != MIRType::RefOrNull) { + continue; + } + + size_t offsetFromTop = + reinterpret_cast<size_t>(trapExitLayout.address(i->gpr())); + + // If this doesn't hold, the associated register wasn't saved by + // the trap exit stub. Better to crash now than much later, in + // some obscure place, and possibly with security consequences. + MOZ_RELEASE_ASSERT(offsetFromTop < trapExitLayoutNumWords); + + // offsetFromTop is an offset in words down from the highest + // address in the exit stub save area. Switch it around to be an + // offset up from the bottom of the (integer register) save area. + size_t offsetFromBottom = trapExitLayoutNumWords - 1 - offsetFromTop; + + (*extras)[offsetFromBottom] = true; + } + + return true; +} + +void EmitWasmPreBarrierGuard(MacroAssembler& masm, Register tls, + Register scratch, Register valueAddr, + Label* skipBarrier) { + // If no incremental GC has started, we don't need the barrier. + masm.loadPtr( + Address(tls, offsetof(TlsData, addressOfNeedsIncrementalBarrier)), + scratch); + masm.branchTest32(Assembler::Zero, Address(scratch, 0), Imm32(0x1), + skipBarrier); + + // If the previous value is null, we don't need the barrier. + masm.loadPtr(Address(valueAddr, 0), scratch); + masm.branchTestPtr(Assembler::Zero, scratch, scratch, skipBarrier); +} + +void EmitWasmPreBarrierCall(MacroAssembler& masm, Register tls, + Register scratch, Register valueAddr) { + MOZ_ASSERT(valueAddr == PreBarrierReg); + + masm.loadPtr(Address(tls, offsetof(TlsData, instance)), scratch); + masm.loadPtr(Address(scratch, Instance::offsetOfPreBarrierCode()), scratch); +#if defined(DEBUG) && defined(JS_CODEGEN_ARM64) + // The prebarrier assumes that x28 == sp. + Label ok; + masm.Cmp(sp, vixl::Operand(x28)); + masm.B(&ok, Assembler::Equal); + masm.breakpoint(); + masm.bind(&ok); +#endif + masm.call(scratch); +} + +void EmitWasmPostBarrierGuard(MacroAssembler& masm, + const Maybe<Register>& object, + Register otherScratch, Register setValue, + Label* skipBarrier) { + // If the pointer being stored is null, no barrier. + masm.branchTestPtr(Assembler::Zero, setValue, setValue, skipBarrier); + + // If there is a containing object and it is in the nursery, no barrier. + if (object) { + masm.branchPtrInNurseryChunk(Assembler::Equal, *object, otherScratch, + skipBarrier); + } + + // If the pointer being stored is to a tenured object, no barrier. + masm.branchPtrInNurseryChunk(Assembler::NotEqual, setValue, otherScratch, + skipBarrier); +} + +} // namespace wasm +} // namespace js |