summaryrefslogtreecommitdiffstats
path: root/toolkit/xre/dllservices/mozglue/interceptor
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /toolkit/xre/dllservices/mozglue/interceptor
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'toolkit/xre/dllservices/mozglue/interceptor')
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/Arm64.cpp89
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/Arm64.h221
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/MMPolicies.h1031
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/PatcherBase.h141
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/PatcherDetour.h1739
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/PatcherNopSpace.h205
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/RangeMap.h142
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/TargetFunction.h1000
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/Trampoline.h773
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/VMSharingPolicies.h285
-rw-r--r--toolkit/xre/dllservices/mozglue/interceptor/moz.build26
11 files changed, 5652 insertions, 0 deletions
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/Arm64.cpp b/toolkit/xre/dllservices/mozglue/interceptor/Arm64.cpp
new file mode 100644
index 0000000000..81d8e6d09b
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/Arm64.cpp
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#include "Arm64.h"
+
+#include "mozilla/ResultVariant.h"
+
+namespace mozilla {
+namespace interceptor {
+namespace arm64 {
+
+struct PCRelativeLoadTest {
+ // Bitmask to be ANDed with the instruction to isolate the bits that this
+ // instance is interested in
+ uint32_t mTestMask;
+ // The desired bits that we want to see after masking
+ uint32_t mMatchBits;
+ // If we match, mDecodeFn provide the code to decode the instruction.
+ LoadOrBranch (*mDecodeFn)(const uintptr_t aPC, const uint32_t aInst);
+};
+
+static LoadOrBranch ADRPDecode(const uintptr_t aPC, const uint32_t aInst) {
+ // Keep in mind that on Windows aarch64, uint32_t is little-endian
+ const uint32_t kMaskDataProcImmPcRelativeImmLo = 0x60000000;
+ const uint32_t kMaskDataProcImmPcRelativeImmHi = 0x00FFFFE0;
+
+ uintptr_t base = aPC;
+ intptr_t offset = SignExtend<intptr_t>(
+ ((aInst & kMaskDataProcImmPcRelativeImmHi) >> 3) |
+ ((aInst & kMaskDataProcImmPcRelativeImmLo) >> 29),
+ 21);
+
+ base &= ~0xFFFULL;
+ offset <<= 12;
+
+ uint8_t reg = aInst & 0x1F;
+
+ return LoadOrBranch(base + offset, reg);
+}
+
+MFBT_API LoadOrBranch BUncondImmDecode(const uintptr_t aPC,
+ const uint32_t aInst) {
+ int32_t offset = SignExtend<int32_t>(aInst & 0x03FFFFFFU, 26);
+ return LoadOrBranch(aPC + offset);
+}
+
+// Order is important here; more specific encoding tests must be placed before
+// less specific encoding tests.
+static const PCRelativeLoadTest gPCRelTests[] = {
+ {0x9FC00000, 0x10000000, nullptr}, // ADR
+ {0x9FC00000, 0x90000000, &ADRPDecode}, // ADRP
+ {0xFF000000, 0x58000000, nullptr}, // LDR (literal) 64-bit GPR
+ {0x3B000000, 0x18000000, nullptr}, // LDR (literal) (remaining forms)
+ {0x7C000000, 0x14000000, nullptr}, // B (unconditional immediate)
+ {0xFE000000, 0x54000000, nullptr}, // B.Cond
+ {0x7E000000, 0x34000000, nullptr}, // Compare and branch (imm)
+ {0x7E000000, 0x36000000, nullptr}, // Test and branch (imm)
+ {0xFE000000, 0xD6000000, nullptr} // Unconditional branch (reg)
+};
+
+/**
+ * In this function we interate through each entry in |gPCRelTests|, AND
+ * |aInst| with |test.mTestMask| to isolate the bits that we're interested in,
+ * then compare that result against |test.mMatchBits|. If we have a match,
+ * then that particular entry is applicable to |aInst|. If |test.mDecodeFn| is
+ * present, then we call it to decode the instruction. If it is not present,
+ * then we assume that this particular instruction is unsupported.
+ */
+MFBT_API Result<LoadOrBranch, PCRelCheckError> CheckForPCRel(
+ const uintptr_t aPC, const uint32_t aInst) {
+ for (auto&& test : gPCRelTests) {
+ if ((aInst & test.mTestMask) == test.mMatchBits) {
+ if (!test.mDecodeFn) {
+ return Err(PCRelCheckError::NoDecoderAvailable);
+ }
+
+ return test.mDecodeFn(aPC, aInst);
+ }
+ }
+
+ return Err(PCRelCheckError::InstructionNotPCRel);
+}
+
+} // namespace arm64
+} // namespace interceptor
+} // namespace mozilla
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/Arm64.h b/toolkit/xre/dllservices/mozglue/interceptor/Arm64.h
new file mode 100644
index 0000000000..4070fbf99f
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/Arm64.h
@@ -0,0 +1,221 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_Arm64_h
+#define mozilla_interceptor_Arm64_h
+
+#include <type_traits>
+
+#include "mozilla/Assertions.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Result.h"
+#include "mozilla/Saturate.h"
+#include "mozilla/Types.h"
+
+namespace mozilla {
+namespace interceptor {
+namespace arm64 {
+
+// clang-format off
+enum class IntegerConditionCode : uint8_t {
+ // From the ARMv8 Architectural Reference Manual, Section C1.2.4
+ // Description Condition Flags
+ EQ = 0b0000, // == Z == 1
+ NE = 0b0001, // != Z == 0
+ CS = 0b0010, // carry set C == 1
+ HS = 0b0010, // carry set (alias) C == 1
+ CC = 0b0011, // carry clear C == 0
+ LO = 0b0011, // carry clear (alias) C == 0
+ MI = 0b0100, // < 0 N == 1
+ PL = 0b0101, // >= 0 N == 0
+ VS = 0b0110, // overflow V == 1
+ VC = 0b0111, // no overflow V == 0
+ HI = 0b1000, // unsigned > C == 1 && Z == 0
+ LS = 0b1001, // unsigned <= !(C == 1 && Z == 0)
+ GE = 0b1010, // signed >= N == V
+ LT = 0b1011, // signed < N != V
+ GT = 0b1100, // signed > Z == 0 && N == V
+ LE = 0b1101, // signed <= !(Z == 0 && N == V)
+ AL = 0b1110, // unconditional <Any>
+ NV = 0b1111 // unconditional (but AL is the preferred encoding)
+};
+// clang-format on
+
+struct LoadOrBranch {
+ enum class Type {
+ Load,
+ Branch,
+ };
+
+ // Load constructor
+ LoadOrBranch(const uintptr_t aAbsAddress, const uint8_t aDestReg)
+ : mType(Type::Load), mAbsAddress(aAbsAddress), mDestReg(aDestReg) {
+ MOZ_ASSERT(aDestReg < 32);
+ }
+
+ // Unconditional branch constructor
+ explicit LoadOrBranch(const uintptr_t aAbsAddress)
+ : mType(Type::Branch),
+ mAbsAddress(aAbsAddress),
+ mCond(IntegerConditionCode::AL) {}
+
+ // Conditional branch constructor
+ LoadOrBranch(const uintptr_t aAbsAddress, const IntegerConditionCode aCond)
+ : mType(Type::Branch), mAbsAddress(aAbsAddress), mCond(aCond) {}
+
+ Type mType;
+
+ // The absolute address to be loaded into a register, or branched to
+ uintptr_t mAbsAddress;
+
+ union {
+ // The destination register for the load
+ uint8_t mDestReg;
+
+ // The condition code for the branch
+ IntegerConditionCode mCond;
+ };
+};
+
+enum class PCRelCheckError {
+ InstructionNotPCRel,
+ NoDecoderAvailable,
+};
+
+MFBT_API Result<LoadOrBranch, PCRelCheckError> CheckForPCRel(
+ const uintptr_t aPC, const uint32_t aInst);
+
+/**
+ * Casts |aValue| to a |ResultT| via sign extension.
+ *
+ * This function should be used when extracting signed immediate values from
+ * an instruction.
+ *
+ * @param aValue The value to be sign extended. This value should already be
+ * isolated from the remainder of the instruction's bits and
+ * shifted all the way to the right.
+ * @param aNumValidBits The number of bits in |aValue| that contain the
+ * immediate signed value, including the sign bit.
+ */
+template <typename ResultT>
+inline ResultT SignExtend(const uint32_t aValue, const uint8_t aNumValidBits) {
+ static_assert(std::is_integral_v<ResultT> && std::is_signed_v<ResultT>,
+ "ResultT must be a signed integral type");
+ MOZ_ASSERT(aNumValidBits < 32U && aNumValidBits > 1);
+
+ using UnsignedResultT = std::decay_t<std::make_unsigned_t<ResultT>>;
+
+ const uint8_t kResultWidthBits = sizeof(ResultT) * 8;
+
+ // Shift left unsigned
+ const uint8_t shiftAmt = kResultWidthBits - aNumValidBits;
+ UnsignedResultT shiftedLeft = static_cast<UnsignedResultT>(aValue)
+ << shiftAmt;
+
+ // Now shift right signed
+ auto result = static_cast<ResultT>(shiftedLeft);
+ result >>= shiftAmt;
+
+ return result;
+}
+
+inline static uint32_t BuildUnconditionalBranchToRegister(const uint32_t aReg) {
+ MOZ_ASSERT(aReg < 32);
+ // BR aReg
+ return 0xD61F0000 | (aReg << 5);
+}
+
+MFBT_API LoadOrBranch BUncondImmDecode(const uintptr_t aPC,
+ const uint32_t aInst);
+
+/**
+ * If |aTarget| is more than 128MB away from |aPC|, we need to use a veneer.
+ */
+inline static bool IsVeneerRequired(const uintptr_t aPC,
+ const uintptr_t aTarget) {
+ detail::Saturate<intptr_t> saturated(aTarget);
+ saturated -= aPC;
+
+ uintptr_t absDiff = Abs(saturated.value());
+
+ return absDiff >= 0x08000000U;
+}
+
+inline static bool IsUnconditionalBranchImm(const uint32_t aInst) {
+ return (aInst & 0xFC000000U) == 0x14000000U;
+}
+
+inline static Maybe<uint32_t> BuildUnconditionalBranchImm(
+ const uintptr_t aPC, const uintptr_t aTarget) {
+ detail::Saturate<intptr_t> saturated(aTarget);
+ saturated -= aPC;
+
+ CheckedInt<int32_t> offset(saturated.value());
+ if (!offset.isValid()) {
+ return Nothing();
+ }
+
+ // offset should be a multiple of 4
+ MOZ_ASSERT(offset.value() % 4 == 0);
+ if (offset.value() % 4) {
+ return Nothing();
+ }
+
+ offset /= 4;
+ if (!offset.isValid()) {
+ return Nothing();
+ }
+
+ uint32_t signbits = static_cast<uint32_t>(offset.value()) & 0xFE000000;
+ // Ensure that offset is small enough to fit into the 26 bit region.
+ // We check that the sign bits are either all ones or all zeros.
+ MOZ_ASSERT(signbits == 0xFE000000 || !signbits);
+ if (signbits && signbits != 0xFE000000) {
+ return Nothing();
+ }
+
+ uint32_t masked = static_cast<uint32_t>(offset.value()) & 0x03FFFFFF;
+
+ // B imm26
+ return Some(0x14000000U | masked);
+}
+
+/**
+ * Allocate and construct a veneer that provides an absolute 64-bit branch to
+ * the hook function.
+ */
+template <typename TrampPoolT>
+inline static uintptr_t MakeVeneer(TrampPoolT& aTrampPool, void* aPrimaryTramp,
+ const uintptr_t aDestAddress) {
+ auto maybeVeneer = aTrampPool.GetNextTrampoline();
+ if (!maybeVeneer) {
+ return 0;
+ }
+
+ Trampoline<typename TrampPoolT::MMPolicyT> veneer(
+ std::move(maybeVeneer.ref()));
+
+ // Write the same header information that is used for trampolines
+ veneer.WriteEncodedPointer(nullptr);
+ veneer.WriteEncodedPointer(aPrimaryTramp);
+
+ veneer.StartExecutableCode();
+
+ // Register 16 is explicitly intended for veneers in ARM64, so we use that
+ // register without fear of clobbering anything important.
+ veneer.WriteLoadLiteral(aDestAddress, 16);
+ veneer.WriteInstruction(BuildUnconditionalBranchToRegister(16));
+
+ return reinterpret_cast<uintptr_t>(veneer.EndExecutableCode());
+}
+
+} // namespace arm64
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // mozilla_interceptor_Arm64_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/MMPolicies.h b/toolkit/xre/dllservices/mozglue/interceptor/MMPolicies.h
new file mode 100644
index 0000000000..0a309a1065
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/MMPolicies.h
@@ -0,0 +1,1031 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_MMPolicies_h
+#define mozilla_interceptor_MMPolicies_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/DynamicallyLinkedFunctionPtr.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Span.h"
+#include "mozilla/TypedEnumBits.h"
+#include "mozilla/Types.h"
+#include "mozilla/WindowsMapRemoteView.h"
+#include "mozilla/WindowsUnwindInfo.h"
+
+#include <windows.h>
+
+#if (NTDDI_VERSION < NTDDI_WIN10_RS4) || defined(__MINGW32__)
+PVOID WINAPI VirtualAlloc2(HANDLE Process, PVOID BaseAddress, SIZE_T Size,
+ ULONG AllocationType, ULONG PageProtection,
+ MEM_EXTENDED_PARAMETER* ExtendedParameters,
+ ULONG ParameterCount);
+PVOID WINAPI MapViewOfFile3(HANDLE FileMapping, HANDLE Process,
+ PVOID BaseAddress, ULONG64 Offset, SIZE_T ViewSize,
+ ULONG AllocationType, ULONG PageProtection,
+ MEM_EXTENDED_PARAMETER* ExtendedParameters,
+ ULONG ParameterCount);
+#endif // (NTDDI_VERSION < NTDDI_WIN10_RS4) || defined(__MINGW32__)
+
+// _CRT_RAND_S is not defined everywhere, but we need it.
+#if !defined(_CRT_RAND_S)
+extern "C" errno_t rand_s(unsigned int* randomValue);
+#endif // !defined(_CRT_RAND_S)
+
+// Declaring only the functions we need in NativeNt.h. To include the entire
+// NativeNt.h causes circular dependency.
+namespace mozilla {
+namespace nt {
+SIZE_T WINAPI VirtualQueryEx(HANDLE aProcess, LPCVOID aAddress,
+ PMEMORY_BASIC_INFORMATION aMemInfo,
+ SIZE_T aMemInfoLen);
+
+SIZE_T WINAPI VirtualQuery(LPCVOID aAddress, PMEMORY_BASIC_INFORMATION aMemInfo,
+ SIZE_T aMemInfoLen);
+} // namespace nt
+} // namespace mozilla
+
+namespace mozilla {
+namespace interceptor {
+
+// This class implements memory operations not involving any kernel32's
+// functions, so that derived classes can use them.
+class MOZ_TRIVIAL_CTOR_DTOR MMPolicyInProcessPrimitive {
+ protected:
+ bool ProtectInternal(decltype(&::VirtualProtect) aVirtualProtect,
+ void* aVAddress, size_t aSize, uint32_t aProtFlags,
+ uint32_t* aPrevProtFlags) const {
+ MOZ_ASSERT(aPrevProtFlags);
+ BOOL ok = aVirtualProtect(aVAddress, aSize, aProtFlags,
+ reinterpret_cast<PDWORD>(aPrevProtFlags));
+ if (!ok && aPrevProtFlags) {
+ // VirtualProtect can fail but still set valid protection flags.
+ // Let's clear those upon failure.
+ *aPrevProtFlags = 0;
+ }
+
+ return !!ok;
+ }
+
+ public:
+ bool Read(void* aToPtr, const void* aFromPtr, size_t aLen) const {
+ ::memcpy(aToPtr, aFromPtr, aLen);
+ return true;
+ }
+
+ bool Write(void* aToPtr, const void* aFromPtr, size_t aLen) const {
+ ::memcpy(aToPtr, aFromPtr, aLen);
+ return true;
+ }
+
+ /**
+ * @return true if the page that hosts aVAddress is accessible.
+ */
+ bool IsPageAccessible(uintptr_t aVAddress) const {
+ MEMORY_BASIC_INFORMATION mbi;
+ SIZE_T result = nt::VirtualQuery(reinterpret_cast<LPCVOID>(aVAddress), &mbi,
+ sizeof(mbi));
+
+ return result && mbi.AllocationProtect && mbi.State == MEM_COMMIT &&
+ mbi.Protect != PAGE_NOACCESS;
+ }
+};
+
+class MOZ_TRIVIAL_CTOR_DTOR MMPolicyBase {
+ protected:
+ static uintptr_t AlignDown(const uintptr_t aUnaligned,
+ const uintptr_t aAlignTo) {
+ MOZ_ASSERT(IsPowerOfTwo(aAlignTo));
+#pragma warning(suppress : 4146)
+ return aUnaligned & (-aAlignTo);
+ }
+
+ static uintptr_t AlignUp(const uintptr_t aUnaligned,
+ const uintptr_t aAlignTo) {
+ MOZ_ASSERT(IsPowerOfTwo(aAlignTo));
+#pragma warning(suppress : 4146)
+ return aUnaligned + ((-aUnaligned) & (aAlignTo - 1));
+ }
+
+ static PVOID AlignUpToRegion(PVOID aUnaligned, uintptr_t aAlignTo,
+ size_t aLen, size_t aDesiredLen) {
+ uintptr_t unaligned = reinterpret_cast<uintptr_t>(aUnaligned);
+ uintptr_t aligned = AlignUp(unaligned, aAlignTo);
+ MOZ_ASSERT(aligned >= unaligned);
+
+ if (aLen < aligned - unaligned) {
+ return nullptr;
+ }
+
+ aLen -= (aligned - unaligned);
+ return reinterpret_cast<PVOID>((aLen >= aDesiredLen) ? aligned : 0);
+ }
+
+ public:
+#if defined(NIGHTLY_BUILD)
+ Maybe<DetourError> mLastError;
+ const Maybe<DetourError>& GetLastDetourError() const { return mLastError; }
+ template <typename... Args>
+ void SetLastDetourError(Args&&... aArgs) {
+ mLastError = Some(DetourError(std::forward<Args>(aArgs)...));
+ }
+#else
+ template <typename... Args>
+ void SetLastDetourError(Args&&... aArgs) {}
+#endif // defined(NIGHTLY_BUILD)
+
+ DWORD ComputeAllocationSize(const uint32_t aRequestedSize) const {
+ MOZ_ASSERT(aRequestedSize);
+ DWORD result = aRequestedSize;
+
+ const uint32_t granularity = GetAllocGranularity();
+
+ uint32_t mod = aRequestedSize % granularity;
+ if (mod) {
+ result += (granularity - mod);
+ }
+
+ return result;
+ }
+
+ DWORD GetAllocGranularity() const {
+ static const DWORD kAllocGranularity = []() -> DWORD {
+ SYSTEM_INFO sysInfo;
+ ::GetSystemInfo(&sysInfo);
+ return sysInfo.dwAllocationGranularity;
+ }();
+
+ return kAllocGranularity;
+ }
+
+ DWORD GetPageSize() const {
+ static const DWORD kPageSize = []() -> DWORD {
+ SYSTEM_INFO sysInfo;
+ ::GetSystemInfo(&sysInfo);
+ return sysInfo.dwPageSize;
+ }();
+
+ return kPageSize;
+ }
+
+ uintptr_t GetMaxUserModeAddress() const {
+ static const uintptr_t kMaxUserModeAddr = []() -> uintptr_t {
+ SYSTEM_INFO sysInfo;
+ ::GetSystemInfo(&sysInfo);
+ return reinterpret_cast<uintptr_t>(sysInfo.lpMaximumApplicationAddress);
+ }();
+
+ return kMaxUserModeAddr;
+ }
+
+ static const uint8_t* GetLowerBound(const Span<const uint8_t>& aBounds) {
+ return &(*aBounds.cbegin());
+ }
+
+ static const uint8_t* GetUpperBoundIncl(const Span<const uint8_t>& aBounds) {
+ // We return an upper bound that is inclusive.
+ return &(*(aBounds.cend() - 1));
+ }
+
+ static const uint8_t* GetUpperBoundExcl(const Span<const uint8_t>& aBounds) {
+ // We return an upper bound that is exclusive by adding 1 to the inclusive
+ // upper bound.
+ return GetUpperBoundIncl(aBounds) + 1;
+ }
+
+ /**
+ * It is convenient for us to provide address range information based on a
+ * "pivot" and a distance from that pivot, as branch instructions operate
+ * within a range of the program counter. OTOH, to actually manage the
+ * regions of memory, it is easier to think about them in terms of their
+ * lower and upper bounds. This function converts from the former format to
+ * the latter format.
+ */
+ Maybe<Span<const uint8_t>> SpanFromPivotAndDistance(
+ const uint32_t aSize, const uintptr_t aPivotAddr,
+ const uint32_t aMaxDistanceFromPivot) const {
+ if (!aPivotAddr || !aMaxDistanceFromPivot) {
+ return Nothing();
+ }
+
+ // We don't allow regions below 1MB so that we're not allocating near any
+ // sensitive areas in our address space.
+ const uintptr_t kMinAllowableAddress = 0x100000;
+
+ const uintptr_t kGranularity(GetAllocGranularity());
+
+ // We subtract the max distance from the pivot to determine our lower bound.
+ CheckedInt<uintptr_t> lowerBound(aPivotAddr);
+ lowerBound -= aMaxDistanceFromPivot;
+ if (lowerBound.isValid()) {
+ // In this case, the subtraction has not underflowed, but we still want
+ // the lower bound to be at least kMinAllowableAddress.
+ lowerBound = std::max(lowerBound.value(), kMinAllowableAddress);
+ } else {
+ // In this case, we underflowed. Forcibly set the lower bound to
+ // kMinAllowableAddress.
+ lowerBound = CheckedInt<uintptr_t>(kMinAllowableAddress);
+ }
+
+ // Align up to the next unit of allocation granularity when necessary.
+ lowerBound = AlignUp(lowerBound.value(), kGranularity);
+ MOZ_ASSERT(lowerBound.isValid());
+ if (!lowerBound.isValid()) {
+ return Nothing();
+ }
+
+ // We must ensure that our region is below the maximum allowable user-mode
+ // address, or our reservation will fail.
+ const uintptr_t kMaxUserModeAddr = GetMaxUserModeAddress();
+
+ // We add the max distance from the pivot to determine our upper bound.
+ CheckedInt<uintptr_t> upperBound(aPivotAddr);
+ upperBound += aMaxDistanceFromPivot;
+ if (upperBound.isValid()) {
+ // In this case, the addition has not overflowed, but we still want
+ // the upper bound to be at most kMaxUserModeAddr.
+ upperBound = std::min(upperBound.value(), kMaxUserModeAddr);
+ } else {
+ // In this case, we overflowed. Forcibly set the upper bound to
+ // kMaxUserModeAddr.
+ upperBound = CheckedInt<uintptr_t>(kMaxUserModeAddr);
+ }
+
+ // Subtract the desired allocation size so that any chunk allocated in the
+ // region will be reachable.
+ upperBound -= aSize;
+ if (!upperBound.isValid()) {
+ return Nothing();
+ }
+
+ // Align down to the next unit of allocation granularity when necessary.
+ upperBound = AlignDown(upperBound.value(), kGranularity);
+ if (!upperBound.isValid()) {
+ return Nothing();
+ }
+
+ MOZ_ASSERT(lowerBound.value() < upperBound.value());
+ if (lowerBound.value() >= upperBound.value()) {
+ return Nothing();
+ }
+
+ // Return the result as a Span
+ return Some(Span(reinterpret_cast<const uint8_t*>(lowerBound.value()),
+ upperBound.value() - lowerBound.value()));
+ }
+
+ /**
+ * This function locates a virtual memory region of |aDesiredBytesLen| that
+ * resides in the interval [aRangeMin, aRangeMax). We do this by scanning the
+ * virtual memory space for a block of unallocated memory that is sufficiently
+ * large.
+ */
+ PVOID FindRegion(HANDLE aProcess, const size_t aDesiredBytesLen,
+ const uint8_t* aRangeMin, const uint8_t* aRangeMax) {
+ // Convert the given pointers to uintptr_t because we should not
+ // compare two pointers unless they are from the same array or object.
+ uintptr_t rangeMin = reinterpret_cast<uintptr_t>(aRangeMin);
+ uintptr_t rangeMax = reinterpret_cast<uintptr_t>(aRangeMax);
+
+ const DWORD kGranularity = GetAllocGranularity();
+ if (!aDesiredBytesLen) {
+ SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_INVALIDLEN);
+ return nullptr;
+ }
+
+ MOZ_ASSERT(rangeMin < rangeMax);
+ if (rangeMin >= rangeMax) {
+ SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_INVALIDRANGE);
+ return nullptr;
+ }
+
+ // Generate a randomized base address that falls within the interval
+ // [aRangeMin, aRangeMax - aDesiredBytesLen]
+ unsigned int rnd = 0;
+ rand_s(&rnd);
+
+ // Reduce rnd to a value that falls within the acceptable range
+ uintptr_t maxOffset =
+ (rangeMax - rangeMin - aDesiredBytesLen) / kGranularity;
+ // Divide by maxOffset + 1 because maxOffset * kGranularity is acceptable.
+ uintptr_t offset = (uintptr_t(rnd) % (maxOffset + 1)) * kGranularity;
+
+ // Start searching at this address
+ const uintptr_t searchStart = rangeMin + offset;
+ // The max address needs to incorporate the desired length
+ const uintptr_t kMaxPtr = rangeMax - aDesiredBytesLen;
+
+ MOZ_DIAGNOSTIC_ASSERT(searchStart <= kMaxPtr);
+
+ MEMORY_BASIC_INFORMATION mbi;
+ SIZE_T len = sizeof(mbi);
+
+ // Scan the range for a free chunk that is at least as large as
+ // aDesiredBytesLen
+ // Scan [searchStart, kMaxPtr]
+ for (uintptr_t address = searchStart; address <= kMaxPtr;) {
+ if (nt::VirtualQueryEx(aProcess, reinterpret_cast<uint8_t*>(address),
+ &mbi, len) != len) {
+ SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_VIRTUALQUERY_ERROR,
+ ::GetLastError());
+ return nullptr;
+ }
+
+ if (mbi.State == MEM_FREE) {
+ // |mbi.BaseAddress| is aligned with the page granularity, but may not
+ // be aligned with the allocation granularity. VirtualAlloc does not
+ // accept such a non-aligned address unless the corresponding allocation
+ // region is free. So we get the next boundary's start address.
+ PVOID regionStart = AlignUpToRegion(mbi.BaseAddress, kGranularity,
+ mbi.RegionSize, aDesiredBytesLen);
+ if (regionStart) {
+ return regionStart;
+ }
+ }
+
+ address = reinterpret_cast<uintptr_t>(mbi.BaseAddress) + mbi.RegionSize;
+ }
+
+ // Scan [aRangeMin, searchStart)
+ for (uintptr_t address = rangeMin; address < searchStart;) {
+ if (nt::VirtualQueryEx(aProcess, reinterpret_cast<uint8_t*>(address),
+ &mbi, len) != len) {
+ SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_VIRTUALQUERY_ERROR,
+ ::GetLastError());
+ return nullptr;
+ }
+
+ if (mbi.State == MEM_FREE) {
+ PVOID regionStart = AlignUpToRegion(mbi.BaseAddress, kGranularity,
+ mbi.RegionSize, aDesiredBytesLen);
+ if (regionStart) {
+ return regionStart;
+ }
+ }
+
+ address = reinterpret_cast<uintptr_t>(mbi.BaseAddress) + mbi.RegionSize;
+ }
+
+ SetLastDetourError(MMPOLICY_RESERVE_FINDREGION_NO_FREE_REGION,
+ ::GetLastError());
+ return nullptr;
+ }
+
+ /**
+ * This function reserves a |aSize| block of virtual memory.
+ *
+ * When |aBounds| is Nothing, it just calls |aReserveFn| and lets Windows
+ * choose the base address.
+ *
+ * Otherwise, it tries to call |aReserveRangeFn| to reserve the memory within
+ * the bounds provided by |aBounds|. It is advantageous to use this function
+ * because the OS's VM manager has better information as to which base
+ * addresses are the best to use.
+ *
+ * If |aReserveRangeFn| retuns Nothing, this means that the platform support
+ * is not available. In that case, we fall back to manually computing a region
+ * to use for reserving the memory by calling |FindRegion|.
+ */
+ template <typename ReserveFnT, typename ReserveRangeFnT>
+ PVOID Reserve(HANDLE aProcess, const uint32_t aSize,
+ const ReserveFnT& aReserveFn,
+ const ReserveRangeFnT& aReserveRangeFn,
+ const Maybe<Span<const uint8_t>>& aBounds) {
+ if (!aBounds) {
+ // No restrictions, let the OS choose the base address
+ PVOID ret = aReserveFn(aProcess, nullptr, aSize);
+ if (!ret) {
+ SetLastDetourError(MMPOLICY_RESERVE_NOBOUND_RESERVE_ERROR,
+ ::GetLastError());
+ }
+ return ret;
+ }
+
+ const uint8_t* lowerBound = GetLowerBound(aBounds.ref());
+ const uint8_t* upperBoundExcl = GetUpperBoundExcl(aBounds.ref());
+
+ Maybe<PVOID> result =
+ aReserveRangeFn(aProcess, aSize, lowerBound, upperBoundExcl);
+ if (result) {
+ return result.value();
+ }
+
+ // aReserveRangeFn is not available on this machine. We'll do a manual
+ // search.
+
+ size_t curAttempt = 0;
+ const size_t kMaxAttempts = 8;
+
+ // We loop here because |FindRegion| may return a base address that
+ // is reserved elsewhere before we have had a chance to reserve it
+ // ourselves.
+ while (curAttempt < kMaxAttempts) {
+ PVOID base = FindRegion(aProcess, aSize, lowerBound, upperBoundExcl);
+ if (!base) {
+ return nullptr;
+ }
+
+ result = Some(aReserveFn(aProcess, base, aSize));
+ if (result.value()) {
+ return result.value();
+ }
+
+ ++curAttempt;
+ }
+
+ // If we run out of attempts, we fall through to the default case where
+ // the system chooses any base address it wants. In that case, the hook
+ // will be set on a best-effort basis.
+ PVOID ret = aReserveFn(aProcess, nullptr, aSize);
+ if (!ret) {
+ SetLastDetourError(MMPOLICY_RESERVE_FINAL_RESERVE_ERROR,
+ ::GetLastError());
+ }
+ return ret;
+ }
+};
+
+class MOZ_TRIVIAL_CTOR_DTOR MMPolicyInProcess
+ : public MMPolicyInProcessPrimitive,
+ public MMPolicyBase {
+ public:
+ typedef MMPolicyInProcess MMPolicyT;
+
+ constexpr MMPolicyInProcess()
+ : mBase(nullptr), mReservationSize(0), mCommitOffset(0) {}
+
+ MMPolicyInProcess(const MMPolicyInProcess&) = delete;
+ MMPolicyInProcess& operator=(const MMPolicyInProcess&) = delete;
+
+ MMPolicyInProcess(MMPolicyInProcess&& aOther)
+ : mBase(nullptr), mReservationSize(0), mCommitOffset(0) {
+ *this = std::move(aOther);
+ }
+
+ MMPolicyInProcess& operator=(MMPolicyInProcess&& aOther) {
+ mBase = aOther.mBase;
+ aOther.mBase = nullptr;
+
+ mCommitOffset = aOther.mCommitOffset;
+ aOther.mCommitOffset = 0;
+
+ mReservationSize = aOther.mReservationSize;
+ aOther.mReservationSize = 0;
+
+ return *this;
+ }
+
+ explicit operator bool() const { return !!mBase; }
+
+ /**
+ * Should we unhook everything upon destruction?
+ */
+ bool ShouldUnhookUponDestruction() const { return true; }
+
+#if defined(_M_IX86)
+ bool WriteAtomic(void* aDestPtr, const uint16_t aValue) const {
+ *static_cast<uint16_t*>(aDestPtr) = aValue;
+ return true;
+ }
+#endif // defined(_M_IX86)
+
+ bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags,
+ uint32_t* aPrevProtFlags) const {
+ return ProtectInternal(::VirtualProtect, aVAddress, aSize, aProtFlags,
+ aPrevProtFlags);
+ }
+
+ bool FlushInstructionCache() const {
+ return !!::FlushInstructionCache(::GetCurrentProcess(), nullptr, 0);
+ }
+
+ static DWORD GetTrampWriteProtFlags() { return PAGE_EXECUTE_READWRITE; }
+
+#if defined(_M_X64)
+ bool IsTrampolineSpaceInLowest2GB() const {
+ return (mBase + mReservationSize) <=
+ reinterpret_cast<uint8_t*>(0x0000000080000000ULL);
+ }
+
+ static constexpr bool kSupportsUnwindInfo = true;
+
+ mozilla::UniquePtr<uint8_t[]> LookupUnwindInfo(
+ uintptr_t aOrigFuncAddr, uint32_t* aOffsetFromBeginAddr,
+ uint32_t* aOffsetToEndAddr, uintptr_t* aOrigImageBase) const {
+ DWORD64 origImageBase = 0;
+ auto origFuncEntry =
+ RtlLookupFunctionEntry(aOrigFuncAddr, &origImageBase, nullptr);
+ if (!origFuncEntry) {
+ return nullptr;
+ }
+
+ if (aOffsetFromBeginAddr) {
+ *aOffsetFromBeginAddr =
+ aOrigFuncAddr - (origImageBase + origFuncEntry->BeginAddress);
+ }
+ if (aOffsetToEndAddr) {
+ *aOffsetToEndAddr =
+ (origImageBase + origFuncEntry->EndAddress) - aOrigFuncAddr;
+ }
+ if (aOrigImageBase) {
+ *aOrigImageBase = origImageBase;
+ }
+ return reinterpret_cast<const UnwindInfo*>(origImageBase +
+ origFuncEntry->UnwindData)
+ ->Copy();
+ }
+
+ bool AddFunctionTable(uintptr_t aFunctionTable, uint32_t aEntryCount,
+ uintptr_t aBaseAddress) const {
+ return bool(
+ RtlAddFunctionTable(reinterpret_cast<PRUNTIME_FUNCTION>(aFunctionTable),
+ aEntryCount, aBaseAddress));
+ }
+#endif // defined(_M_X64)
+
+ protected:
+ uint8_t* GetLocalView() const { return mBase; }
+
+ uintptr_t GetRemoteView() const {
+ // Same as local view for in-process
+ return reinterpret_cast<uintptr_t>(mBase);
+ }
+
+ /**
+ * @return the effective number of bytes reserved, or 0 on failure
+ */
+ uint32_t Reserve(const uint32_t aSize,
+ const Maybe<Span<const uint8_t>>& aBounds) {
+ if (!aSize) {
+ return 0;
+ }
+
+ if (mBase) {
+ MOZ_ASSERT(mReservationSize >= aSize);
+ return mReservationSize;
+ }
+
+ mReservationSize = ComputeAllocationSize(aSize);
+
+ auto reserveFn = [](HANDLE aProcess, PVOID aBase, uint32_t aSize) -> PVOID {
+ return ::VirtualAlloc(aBase, aSize, MEM_RESERVE, PAGE_NOACCESS);
+ };
+
+ auto reserveWithinRangeFn =
+ [](HANDLE aProcess, uint32_t aSize, const uint8_t* aRangeMin,
+ const uint8_t* aRangeMaxExcl) -> Maybe<PVOID> {
+ static const StaticDynamicallyLinkedFunctionPtr<
+ decltype(&::VirtualAlloc2)>
+ pVirtualAlloc2(L"kernelbase.dll", "VirtualAlloc2");
+ if (!pVirtualAlloc2) {
+ return Nothing();
+ }
+
+ // NB: MEM_ADDRESS_REQUIREMENTS::HighestEndingAddress is *inclusive*
+ MEM_ADDRESS_REQUIREMENTS memReq = {
+ const_cast<uint8_t*>(aRangeMin),
+ const_cast<uint8_t*>(aRangeMaxExcl - 1)};
+
+ MEM_EXTENDED_PARAMETER memParam = {};
+ memParam.Type = MemExtendedParameterAddressRequirements;
+ memParam.Pointer = &memReq;
+
+ return Some(pVirtualAlloc2(aProcess, nullptr, aSize, MEM_RESERVE,
+ PAGE_NOACCESS, &memParam, 1));
+ };
+
+ mBase = static_cast<uint8_t*>(
+ MMPolicyBase::Reserve(::GetCurrentProcess(), mReservationSize,
+ reserveFn, reserveWithinRangeFn, aBounds));
+
+ if (!mBase) {
+ return 0;
+ }
+
+ return mReservationSize;
+ }
+
+ bool MaybeCommitNextPage(const uint32_t aRequestedOffset,
+ const uint32_t aRequestedLength) {
+ if (!(*this)) {
+ return false;
+ }
+
+ uint32_t limit = aRequestedOffset + aRequestedLength - 1;
+ if (limit < mCommitOffset) {
+ // No commit required
+ return true;
+ }
+
+ MOZ_DIAGNOSTIC_ASSERT(mCommitOffset < mReservationSize);
+ if (mCommitOffset >= mReservationSize) {
+ return false;
+ }
+
+ PVOID local = ::VirtualAlloc(mBase + mCommitOffset, GetPageSize(),
+ MEM_COMMIT, PAGE_EXECUTE_READ);
+ if (!local) {
+ return false;
+ }
+
+ mCommitOffset += GetPageSize();
+ return true;
+ }
+
+ private:
+ uint8_t* mBase;
+ uint32_t mReservationSize;
+ uint32_t mCommitOffset;
+};
+
+// This class manages in-process memory access without using functions
+// imported from kernel32.dll. Instead, it uses functions in its own
+// function table that are provided from outside.
+class MMPolicyInProcessEarlyStage : public MMPolicyInProcessPrimitive {
+ public:
+ struct Kernel32Exports {
+ decltype(&::FlushInstructionCache) mFlushInstructionCache;
+ decltype(&::GetModuleHandleW) mGetModuleHandleW;
+ decltype(&::GetSystemInfo) mGetSystemInfo;
+ decltype(&::VirtualProtect) mVirtualProtect;
+ };
+
+ private:
+ static DWORD GetPageSize(const Kernel32Exports& aK32Exports) {
+ SYSTEM_INFO sysInfo;
+ aK32Exports.mGetSystemInfo(&sysInfo);
+ return sysInfo.dwPageSize;
+ }
+
+ const Kernel32Exports& mK32Exports;
+ const DWORD mPageSize;
+
+ public:
+ explicit MMPolicyInProcessEarlyStage(const Kernel32Exports& aK32Exports)
+ : mK32Exports(aK32Exports), mPageSize(GetPageSize(mK32Exports)) {}
+
+ // The pattern of constructing a local static variable with a lambda,
+ // which can be seen in MMPolicyBase, is compiled into code with the
+ // critical section APIs like EnterCriticalSection imported from kernel32.dll.
+ // Because this class needs to be able to run in a process's early stage
+ // when IAT is not yet resolved, we cannot use that patten, thus simply
+ // caching a value as a local member in the class.
+ DWORD GetPageSize() const { return mPageSize; }
+
+ bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags,
+ uint32_t* aPrevProtFlags) const {
+ return ProtectInternal(mK32Exports.mVirtualProtect, aVAddress, aSize,
+ aProtFlags, aPrevProtFlags);
+ }
+
+ bool FlushInstructionCache() const {
+ const HANDLE kCurrentProcess = reinterpret_cast<HANDLE>(-1);
+ return !!mK32Exports.mFlushInstructionCache(kCurrentProcess, nullptr, 0);
+ }
+};
+
+class MMPolicyOutOfProcess : public MMPolicyBase {
+ public:
+ typedef MMPolicyOutOfProcess MMPolicyT;
+
+ explicit MMPolicyOutOfProcess(HANDLE aProcess)
+ : mProcess(nullptr),
+ mMapping(nullptr),
+ mLocalView(nullptr),
+ mRemoteView(nullptr),
+ mReservationSize(0),
+ mCommitOffset(0) {
+ MOZ_ASSERT(aProcess);
+ ::DuplicateHandle(::GetCurrentProcess(), aProcess, ::GetCurrentProcess(),
+ &mProcess, kAccessFlags, FALSE, 0);
+ MOZ_ASSERT(mProcess);
+ }
+
+ explicit MMPolicyOutOfProcess(DWORD aPid)
+ : mProcess(::OpenProcess(kAccessFlags, FALSE, aPid)),
+ mMapping(nullptr),
+ mLocalView(nullptr),
+ mRemoteView(nullptr),
+ mReservationSize(0),
+ mCommitOffset(0) {
+ MOZ_ASSERT(mProcess);
+ }
+
+ ~MMPolicyOutOfProcess() { Destroy(); }
+
+ MMPolicyOutOfProcess(MMPolicyOutOfProcess&& aOther)
+ : mProcess(nullptr),
+ mMapping(nullptr),
+ mLocalView(nullptr),
+ mRemoteView(nullptr),
+ mReservationSize(0),
+ mCommitOffset(0) {
+ *this = std::move(aOther);
+ }
+
+ MMPolicyOutOfProcess(const MMPolicyOutOfProcess& aOther) = delete;
+ MMPolicyOutOfProcess& operator=(const MMPolicyOutOfProcess&) = delete;
+
+ MMPolicyOutOfProcess& operator=(MMPolicyOutOfProcess&& aOther) {
+ Destroy();
+
+ mProcess = aOther.mProcess;
+ aOther.mProcess = nullptr;
+
+ mMapping = aOther.mMapping;
+ aOther.mMapping = nullptr;
+
+ mLocalView = aOther.mLocalView;
+ aOther.mLocalView = nullptr;
+
+ mRemoteView = aOther.mRemoteView;
+ aOther.mRemoteView = nullptr;
+
+ mReservationSize = aOther.mReservationSize;
+ aOther.mReservationSize = 0;
+
+ mCommitOffset = aOther.mCommitOffset;
+ aOther.mCommitOffset = 0;
+
+ return *this;
+ }
+
+ explicit operator bool() const {
+ return mProcess && mMapping && mLocalView && mRemoteView;
+ }
+
+ bool ShouldUnhookUponDestruction() const {
+ // We don't clean up hooks for remote processes; they are expected to
+ // outlive our process.
+ return false;
+ }
+
+ // This function reads as many bytes as |aLen| from the target process and
+ // succeeds only when the entire area to be read is accessible.
+ bool Read(void* aToPtr, const void* aFromPtr, size_t aLen) const {
+ MOZ_ASSERT(mProcess);
+ if (!mProcess) {
+ return false;
+ }
+
+ SIZE_T numBytes = 0;
+ BOOL ok = ::ReadProcessMemory(mProcess, aFromPtr, aToPtr, aLen, &numBytes);
+ return ok && numBytes == aLen;
+ }
+
+ // This function reads as many bytes as possible from the target process up
+ // to |aLen| bytes and returns the number of bytes which was actually read.
+ size_t TryRead(void* aToPtr, const void* aFromPtr, size_t aLen) const {
+ MOZ_ASSERT(mProcess);
+ if (!mProcess) {
+ return 0;
+ }
+
+ uint32_t pageSize = GetPageSize();
+ uintptr_t pageMask = pageSize - 1;
+
+ auto rangeStart = reinterpret_cast<uintptr_t>(aFromPtr);
+ auto rangeEnd = rangeStart + aLen;
+
+ while (rangeStart < rangeEnd) {
+ SIZE_T numBytes = 0;
+ BOOL ok = ::ReadProcessMemory(mProcess, aFromPtr, aToPtr,
+ rangeEnd - rangeStart, &numBytes);
+ if (ok) {
+ return numBytes;
+ }
+
+ // If ReadProcessMemory fails, try to read up to each page boundary from
+ // the end of the requested area one by one.
+ if (rangeEnd & pageMask) {
+ rangeEnd &= ~pageMask;
+ } else {
+ rangeEnd -= pageSize;
+ }
+ }
+
+ return 0;
+ }
+
+ bool Write(void* aToPtr, const void* aFromPtr, size_t aLen) const {
+ MOZ_ASSERT(mProcess);
+ if (!mProcess) {
+ return false;
+ }
+
+ SIZE_T numBytes = 0;
+ BOOL ok = ::WriteProcessMemory(mProcess, aToPtr, aFromPtr, aLen, &numBytes);
+ return ok && numBytes == aLen;
+ }
+
+ bool Protect(void* aVAddress, size_t aSize, uint32_t aProtFlags,
+ uint32_t* aPrevProtFlags) const {
+ MOZ_ASSERT(mProcess);
+ if (!mProcess) {
+ return false;
+ }
+
+ MOZ_ASSERT(aPrevProtFlags);
+ BOOL ok = ::VirtualProtectEx(mProcess, aVAddress, aSize, aProtFlags,
+ reinterpret_cast<PDWORD>(aPrevProtFlags));
+ if (!ok && aPrevProtFlags) {
+ // VirtualProtectEx can fail but still set valid protection flags.
+ // Let's clear those upon failure.
+ *aPrevProtFlags = 0;
+ }
+
+ return !!ok;
+ }
+
+ /**
+ * @return true if the page that hosts aVAddress is accessible.
+ */
+ bool IsPageAccessible(uintptr_t aVAddress) const {
+ MEMORY_BASIC_INFORMATION mbi;
+ SIZE_T result = nt::VirtualQueryEx(
+ mProcess, reinterpret_cast<LPCVOID>(aVAddress), &mbi, sizeof(mbi));
+
+ return result && mbi.AllocationProtect && mbi.State == MEM_COMMIT &&
+ mbi.Protect != PAGE_NOACCESS;
+ }
+
+ bool FlushInstructionCache() const {
+ return !!::FlushInstructionCache(mProcess, nullptr, 0);
+ }
+
+ static DWORD GetTrampWriteProtFlags() { return PAGE_READWRITE; }
+
+#if defined(_M_X64)
+ bool IsTrampolineSpaceInLowest2GB() const {
+ return (GetRemoteView() + mReservationSize) <= 0x0000000080000000ULL;
+ }
+
+ // TODO: We should also implement unwind info for our out-of-process policy.
+ static constexpr bool kSupportsUnwindInfo = false;
+
+ inline mozilla::UniquePtr<uint8_t[]> LookupUnwindInfo(
+ uintptr_t aOrigFuncAddr, uint32_t* aOffsetFromBeginAddr,
+ uint32_t* aOffsetToEndAddr, uintptr_t* aOrigImageBase) const {
+ return nullptr;
+ }
+
+ inline bool AddFunctionTable(uintptr_t aNewTable, uint32_t aEntryCount,
+ uintptr_t aBaseAddress) const {
+ return false;
+ }
+#endif // defined(_M_X64)
+
+ protected:
+ uint8_t* GetLocalView() const { return mLocalView; }
+
+ uintptr_t GetRemoteView() const {
+ return reinterpret_cast<uintptr_t>(mRemoteView);
+ }
+
+ /**
+ * @return the effective number of bytes reserved, or 0 on failure
+ */
+ uint32_t Reserve(const uint32_t aSize,
+ const Maybe<Span<const uint8_t>>& aBounds) {
+ if (!aSize || !mProcess) {
+ SetLastDetourError(MMPOLICY_RESERVE_INVALIDARG);
+ return 0;
+ }
+
+ if (mRemoteView) {
+ MOZ_ASSERT(mReservationSize >= aSize);
+ SetLastDetourError(MMPOLICY_RESERVE_ZERO_RESERVATIONSIZE);
+ return mReservationSize;
+ }
+
+ mReservationSize = ComputeAllocationSize(aSize);
+
+ mMapping = ::CreateFileMappingW(INVALID_HANDLE_VALUE, nullptr,
+ PAGE_EXECUTE_READWRITE | SEC_RESERVE, 0,
+ mReservationSize, nullptr);
+ if (!mMapping) {
+ SetLastDetourError(MMPOLICY_RESERVE_CREATEFILEMAPPING, ::GetLastError());
+ return 0;
+ }
+
+ mLocalView = static_cast<uint8_t*>(
+ ::MapViewOfFile(mMapping, FILE_MAP_WRITE, 0, 0, 0));
+ if (!mLocalView) {
+ SetLastDetourError(MMPOLICY_RESERVE_MAPVIEWOFFILE, ::GetLastError());
+ return 0;
+ }
+
+ auto reserveFn = [mapping = mMapping](HANDLE aProcess, PVOID aBase,
+ uint32_t aSize) -> PVOID {
+ return mozilla::MapRemoteViewOfFile(mapping, aProcess, 0ULL, aBase, 0, 0,
+ PAGE_EXECUTE_READ);
+ };
+
+ auto reserveWithinRangeFn =
+ [mapping = mMapping](HANDLE aProcess, uint32_t aSize,
+ const uint8_t* aRangeMin,
+ const uint8_t* aRangeMaxExcl) -> Maybe<PVOID> {
+ static const StaticDynamicallyLinkedFunctionPtr<
+ decltype(&::MapViewOfFile3)>
+ pMapViewOfFile3(L"kernelbase.dll", "MapViewOfFile3");
+ if (!pMapViewOfFile3) {
+ return Nothing();
+ }
+
+ // NB: MEM_ADDRESS_REQUIREMENTS::HighestEndingAddress is *inclusive*
+ MEM_ADDRESS_REQUIREMENTS memReq = {
+ const_cast<uint8_t*>(aRangeMin),
+ const_cast<uint8_t*>(aRangeMaxExcl - 1)};
+
+ MEM_EXTENDED_PARAMETER memParam = {};
+ memParam.Type = MemExtendedParameterAddressRequirements;
+ memParam.Pointer = &memReq;
+
+ return Some(pMapViewOfFile3(mapping, aProcess, nullptr, 0, aSize, 0,
+ PAGE_EXECUTE_READ, &memParam, 1));
+ };
+
+ mRemoteView = MMPolicyBase::Reserve(mProcess, mReservationSize, reserveFn,
+ reserveWithinRangeFn, aBounds);
+ if (!mRemoteView) {
+ return 0;
+ }
+
+ return mReservationSize;
+ }
+
+ bool MaybeCommitNextPage(const uint32_t aRequestedOffset,
+ const uint32_t aRequestedLength) {
+ if (!(*this)) {
+ return false;
+ }
+
+ uint32_t limit = aRequestedOffset + aRequestedLength - 1;
+ if (limit < mCommitOffset) {
+ // No commit required
+ return true;
+ }
+
+ MOZ_DIAGNOSTIC_ASSERT(mCommitOffset < mReservationSize);
+ if (mCommitOffset >= mReservationSize) {
+ return false;
+ }
+
+ PVOID local = ::VirtualAlloc(mLocalView + mCommitOffset, GetPageSize(),
+ MEM_COMMIT, PAGE_READWRITE);
+ if (!local) {
+ return false;
+ }
+
+ PVOID remote = ::VirtualAllocEx(
+ mProcess, static_cast<uint8_t*>(mRemoteView) + mCommitOffset,
+ GetPageSize(), MEM_COMMIT, PAGE_EXECUTE_READ);
+ if (!remote) {
+ return false;
+ }
+
+ mCommitOffset += GetPageSize();
+ return true;
+ }
+
+ private:
+ void Destroy() {
+ // We always leak the remote view
+ if (mLocalView) {
+ ::UnmapViewOfFile(mLocalView);
+ mLocalView = nullptr;
+ }
+
+ if (mMapping) {
+ ::CloseHandle(mMapping);
+ mMapping = nullptr;
+ }
+
+ if (mProcess) {
+ ::CloseHandle(mProcess);
+ mProcess = nullptr;
+ }
+ }
+
+ private:
+ HANDLE mProcess;
+ HANDLE mMapping;
+ uint8_t* mLocalView;
+ PVOID mRemoteView;
+ uint32_t mReservationSize;
+ uint32_t mCommitOffset;
+
+ static const DWORD kAccessFlags = PROCESS_QUERY_INFORMATION |
+ PROCESS_VM_OPERATION | PROCESS_VM_READ |
+ PROCESS_VM_WRITE;
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // mozilla_interceptor_MMPolicies_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/PatcherBase.h b/toolkit/xre/dllservices/mozglue/interceptor/PatcherBase.h
new file mode 100644
index 0000000000..e39a38fafd
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/PatcherBase.h
@@ -0,0 +1,141 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_PatcherBase_h
+#define mozilla_interceptor_PatcherBase_h
+
+#include "mozilla/interceptor/TargetFunction.h"
+
+namespace mozilla {
+namespace interceptor {
+
+template <typename MMPolicy>
+struct GetProcAddressSelector;
+
+template <>
+struct GetProcAddressSelector<MMPolicyOutOfProcess> {
+ FARPROC operator()(HMODULE aModule, const char* aName,
+ const MMPolicyOutOfProcess& aMMPolicy) const {
+ auto exportSection =
+ mozilla::nt::PEExportSection<MMPolicyOutOfProcess>::Get(aModule,
+ aMMPolicy);
+ return exportSection.GetProcAddress(aName);
+ }
+};
+
+template <>
+struct GetProcAddressSelector<MMPolicyInProcess> {
+ FARPROC operator()(HMODULE aModule, const char* aName,
+ const MMPolicyInProcess&) const {
+ // PEExportSection works for MMPolicyInProcess, too, but the native
+ // GetProcAddress is still better because PEExportSection does not
+ // solve a forwarded entry.
+ return ::GetProcAddress(aModule, aName);
+ }
+};
+
+template <typename VMPolicy>
+class WindowsDllPatcherBase {
+ protected:
+ typedef typename VMPolicy::MMPolicyT MMPolicyT;
+
+ template <typename... Args>
+ explicit WindowsDllPatcherBase(Args&&... aArgs)
+ : mVMPolicy(std::forward<Args>(aArgs)...) {}
+
+ ReadOnlyTargetFunction<MMPolicyT> ResolveRedirectedAddress(
+ FARPROC aOriginalFunction) {
+ uintptr_t currAddr = reinterpret_cast<uintptr_t>(aOriginalFunction);
+
+#if defined(_M_IX86) || defined(_M_X64)
+ uintptr_t prevAddr = 0;
+ while (prevAddr != currAddr) {
+ ReadOnlyTargetFunction<MMPolicyT> currFunc(mVMPolicy, currAddr);
+ prevAddr = currAddr;
+
+ // If function entry is jmp rel8 stub to the internal implementation, we
+ // resolve redirected address from the jump target.
+ uintptr_t nextAddr = 0;
+ if (currFunc.IsRelativeShortJump(&nextAddr)) {
+ int8_t offset = nextAddr - currFunc.GetAddress() - 2;
+
+# if defined(_M_X64)
+ // We redirect to the target of a short jump backwards if the target
+ // is another jump (only 32-bit displacement is currently supported).
+ // This case is used by GetFileAttributesW in Win7 x64.
+ if ((offset < 0) && (currFunc.IsValidAtOffset(2 + offset))) {
+ ReadOnlyTargetFunction<MMPolicyT> redirectFn(mVMPolicy, nextAddr);
+ if (redirectFn.IsIndirectNearJump(&nextAddr)) {
+ return redirectFn;
+ }
+ }
+# endif
+
+ // We check the downstream has enough nop-space only when the offset is
+ // positive. Otherwise we stop chasing redirects and let the caller
+ // fail to hook.
+ if (offset > 0) {
+ bool isNopSpace = true;
+ for (int8_t i = 0; i < offset; i++) {
+ if (currFunc[2 + i] != 0x90) {
+ isNopSpace = false;
+ break;
+ }
+ }
+
+ if (isNopSpace) {
+ currAddr = nextAddr;
+ }
+ }
+# if defined(_M_X64)
+ } else if (currFunc.IsIndirectNearJump(&nextAddr) ||
+ currFunc.IsRelativeNearJump(&nextAddr)) {
+# else
+ } else if (currFunc.IsIndirectNearJump(&nextAddr)) {
+# endif
+ // If function entry is jmp [disp32] such as used by kernel32, we
+ // resolve redirected address from import table. For x64, we resolve
+ // a relative near jump for TestDllInterceptor with --disable-optimize.
+ currAddr = nextAddr;
+ }
+ }
+#endif // defined(_M_IX86) || defined(_M_X64)
+
+ if (currAddr != reinterpret_cast<uintptr_t>(aOriginalFunction) &&
+ !mVMPolicy.IsPageAccessible(currAddr)) {
+ currAddr = reinterpret_cast<uintptr_t>(aOriginalFunction);
+ }
+ return ReadOnlyTargetFunction<MMPolicyT>(mVMPolicy, currAddr);
+ }
+
+ public:
+ FARPROC GetProcAddress(HMODULE aModule, const char* aName) const {
+ GetProcAddressSelector<MMPolicyT> selector;
+ return selector(aModule, aName, mVMPolicy);
+ }
+
+ bool IsPageAccessible(uintptr_t aAddress) const {
+ return mVMPolicy.IsPageAccessible(aAddress);
+ }
+
+#if defined(NIGHTLY_BUILD)
+ const Maybe<DetourError>& GetLastDetourError() const {
+ return mVMPolicy.GetLastDetourError();
+ }
+#endif // defined(NIGHTLY_BUILD)
+ template <typename... Args>
+ void SetLastDetourError(Args&&... aArgs) {
+ mVMPolicy.SetLastDetourError(std::forward<Args>(aArgs)...);
+ }
+
+ protected:
+ VMPolicy mVMPolicy;
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // mozilla_interceptor_PatcherBase_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/PatcherDetour.h b/toolkit/xre/dllservices/mozglue/interceptor/PatcherDetour.h
new file mode 100644
index 0000000000..e0b33c7add
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/PatcherDetour.h
@@ -0,0 +1,1739 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_PatcherDetour_h
+#define mozilla_interceptor_PatcherDetour_h
+
+#if defined(_M_ARM64)
+# include "mozilla/interceptor/Arm64.h"
+#endif // defined(_M_ARM64)
+#include <utility>
+
+#include "mozilla/Maybe.h"
+#include "mozilla/NativeNt.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TypedEnumBits.h"
+#include "mozilla/Types.h"
+#include "mozilla/Unused.h"
+#include "mozilla/interceptor/PatcherBase.h"
+#include "mozilla/interceptor/Trampoline.h"
+#include "mozilla/interceptor/VMSharingPolicies.h"
+
+#define COPY_CODES(NBYTES) \
+ do { \
+ tramp.CopyCodes(origBytes.GetAddress(), NBYTES); \
+ origBytes += NBYTES; \
+ } while (0)
+
+namespace mozilla {
+namespace interceptor {
+
+enum class DetourFlags : uint32_t {
+ eDefault = 0,
+ eEnable10BytePatch = 1, // Allow 10-byte patches when conditions allow
+ eTestOnlyForceShortPatch =
+ 2, // Force short patches at all times (x86-64 and arm64 testing only)
+ eDontResolveRedirection =
+ 4, // Don't resolve the redirection of JMP (e.g. kernel32 -> kernelbase)
+};
+
+MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(DetourFlags)
+
+// This class is responsible to do tasks which depend on MMPolicy, decoupled
+// from VMPolicy. We already have WindowsDllPatcherBase, but it needs to
+// depend on VMPolicy to hold an instance of VMPolicy as a member.
+template <typename MMPolicyT>
+class WindowsDllDetourPatcherPrimitive {
+ protected:
+#if defined(_M_ARM64)
+ // LDR x16, .+8
+ static const uint32_t kLdrX16Plus8 = 0x58000050U;
+#endif // defined(_M_ARM64)
+
+ static void ApplyDefaultPatch(WritableTargetFunction<MMPolicyT>& target,
+ intptr_t aDest) {
+#if defined(_M_IX86)
+ target.WriteByte(0xe9); // jmp
+ target.WriteDisp32(aDest); // hook displacement
+#elif defined(_M_X64)
+ // mov r11, address
+ target.WriteByte(0x49);
+ target.WriteByte(0xbb);
+ target.WritePointer(aDest);
+
+ // jmp r11
+ target.WriteByte(0x41);
+ target.WriteByte(0xff);
+ target.WriteByte(0xe3);
+#elif defined(_M_ARM64)
+ // The default patch requires 16 bytes
+ // LDR x16, .+8
+ target.WriteLong(kLdrX16Plus8);
+ // BR x16
+ target.WriteLong(arm64::BuildUnconditionalBranchToRegister(16));
+ target.WritePointer(aDest);
+#else
+# error "Unsupported processor architecture"
+#endif
+ }
+
+ public:
+ constexpr static uint32_t GetWorstCaseRequiredBytesToPatch() {
+#if defined(_M_IX86)
+ return 5;
+#elif defined(_M_X64)
+ return 13;
+#elif defined(_M_ARM64)
+ return 16;
+#else
+# error "Unsupported processor architecture"
+#endif
+ }
+
+ WindowsDllDetourPatcherPrimitive() = default;
+
+ WindowsDllDetourPatcherPrimitive(const WindowsDllDetourPatcherPrimitive&) =
+ delete;
+ WindowsDllDetourPatcherPrimitive(WindowsDllDetourPatcherPrimitive&&) = delete;
+ WindowsDllDetourPatcherPrimitive& operator=(
+ const WindowsDllDetourPatcherPrimitive&) = delete;
+ WindowsDllDetourPatcherPrimitive& operator=(
+ WindowsDllDetourPatcherPrimitive&&) = delete;
+
+ bool AddIrreversibleHook(const MMPolicyT& aMMPolicy, FARPROC aTargetFn,
+ intptr_t aHookDest) {
+ ReadOnlyTargetFunction<MMPolicyT> targetReadOnly(aMMPolicy, aTargetFn);
+
+ WritableTargetFunction<MMPolicyT> targetWritable(
+ targetReadOnly.Promote(GetWorstCaseRequiredBytesToPatch()));
+ if (!targetWritable) {
+ return false;
+ }
+
+ ApplyDefaultPatch(targetWritable, aHookDest);
+
+ return targetWritable.Commit();
+ }
+};
+
+template <typename VMPolicy>
+class WindowsDllDetourPatcher final
+ : public WindowsDllDetourPatcherPrimitive<typename VMPolicy::MMPolicyT>,
+ public WindowsDllPatcherBase<VMPolicy> {
+ using MMPolicyT = typename VMPolicy::MMPolicyT;
+ using TrampPoolT = typename VMPolicy::PoolType;
+ using PrimitiveT = WindowsDllDetourPatcherPrimitive<MMPolicyT>;
+ Maybe<DetourFlags> mFlags;
+
+ public:
+ template <typename... Args>
+ explicit WindowsDllDetourPatcher(Args&&... aArgs)
+ : WindowsDllPatcherBase<VMPolicy>(std::forward<Args>(aArgs)...) {}
+
+ ~WindowsDllDetourPatcher() { Clear(); }
+
+ WindowsDllDetourPatcher(const WindowsDllDetourPatcher&) = delete;
+ WindowsDllDetourPatcher(WindowsDllDetourPatcher&&) = delete;
+ WindowsDllDetourPatcher& operator=(const WindowsDllDetourPatcher&) = delete;
+ WindowsDllDetourPatcher& operator=(WindowsDllDetourPatcher&&) = delete;
+
+ void Clear() {
+ if (!this->mVMPolicy.ShouldUnhookUponDestruction()) {
+ return;
+ }
+
+#if defined(_M_IX86)
+ size_t nBytes = 1 + sizeof(intptr_t);
+#elif defined(_M_X64)
+ size_t nBytes = 2 + sizeof(intptr_t);
+#elif defined(_M_ARM64)
+ size_t nBytes = 2 * sizeof(uint32_t) + sizeof(uintptr_t);
+#else
+# error "Unknown processor type"
+#endif
+
+ const auto& tramps = this->mVMPolicy.Items();
+ for (auto&& tramp : tramps) {
+ // First we read the pointer to the interceptor instance.
+ Maybe<uintptr_t> instance = tramp.ReadEncodedPointer();
+ if (!instance) {
+ continue;
+ }
+
+ if (instance.value() != reinterpret_cast<uintptr_t>(this)) {
+ // tramp does not belong to this interceptor instance.
+ continue;
+ }
+
+ auto clearInstance = MakeScopeExit([&tramp]() -> void {
+ // Clear the instance pointer so that no future instances with the same
+ // |this| pointer will attempt to reset its hook.
+ tramp.Rewind();
+ tramp.WriteEncodedPointer(nullptr);
+ });
+
+ // Now we read the pointer to the intercepted function.
+ Maybe<uintptr_t> interceptedFn = tramp.ReadEncodedPointer();
+ if (!interceptedFn) {
+ continue;
+ }
+
+ WritableTargetFunction<MMPolicyT> origBytes(
+ this->mVMPolicy, interceptedFn.value(), nBytes);
+ if (!origBytes) {
+ continue;
+ }
+
+#if defined(_M_IX86) || defined(_M_X64)
+
+ Maybe<uint8_t> maybeOpcode1 = origBytes.ReadByte();
+ if (!maybeOpcode1) {
+ continue;
+ }
+
+ uint8_t opcode1 = maybeOpcode1.value();
+
+# if defined(_M_IX86)
+ // Ensure the JMP from CreateTrampoline is where we expect it to be.
+ MOZ_ASSERT(opcode1 == 0xE9);
+ if (opcode1 != 0xE9) {
+ continue;
+ }
+
+ intptr_t startOfTrampInstructions =
+ static_cast<intptr_t>(tramp.GetCurrentRemoteAddress());
+
+ origBytes.WriteDisp32(startOfTrampInstructions);
+ if (!origBytes) {
+ continue;
+ }
+
+ origBytes.Commit();
+# elif defined(_M_X64)
+ // Note: At the moment we clear 13-byte patches by replacing the jump to
+ // the patched function by a jump to the stub code. The original
+ // bytes of the original function are *not* restored. This implies
+ // that the stub code outlives our cleaning, so unwind information
+ // remains useful and must not be removed here.
+ if (opcode1 == 0x49) {
+ if (!Clear13BytePatch(origBytes, tramp.GetCurrentRemoteAddress())) {
+ continue;
+ }
+ } else if (opcode1 == 0xB8) {
+ if (!Clear10BytePatch(origBytes)) {
+ continue;
+ }
+ } else if (opcode1 == 0x48) {
+ // The original function was just a different trampoline
+ if (!ClearTrampolinePatch(origBytes, tramp.GetCurrentRemoteAddress())) {
+ continue;
+ }
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized patch!");
+ continue;
+ }
+# endif
+
+#elif defined(_M_ARM64)
+
+ // Ensure that we see the instruction that we expect
+ Maybe<uint32_t> inst1 = origBytes.ReadLong();
+ if (!inst1) {
+ continue;
+ }
+
+ if (inst1.value() == this->kLdrX16Plus8) {
+ if (!Clear16BytePatch(origBytes, tramp.GetCurrentRemoteAddress())) {
+ continue;
+ }
+ } else if (arm64::IsUnconditionalBranchImm(inst1.value())) {
+ if (!Clear4BytePatch(inst1.value(), origBytes)) {
+ continue;
+ }
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized patch!");
+ continue;
+ }
+
+#else
+# error "Unknown processor type"
+#endif
+ }
+
+ this->mVMPolicy.Clear();
+ }
+
+#if defined(_M_X64)
+ bool Clear13BytePatch(WritableTargetFunction<MMPolicyT>& aOrigBytes,
+ const uintptr_t aResetToAddress) {
+ Maybe<uint8_t> maybeOpcode2 = aOrigBytes.ReadByte();
+ if (!maybeOpcode2) {
+ return false;
+ }
+
+ uint8_t opcode2 = maybeOpcode2.value();
+ if (opcode2 != 0xBB) {
+ return false;
+ }
+
+ aOrigBytes.WritePointer(aResetToAddress);
+ if (!aOrigBytes) {
+ return false;
+ }
+
+ return aOrigBytes.Commit();
+ }
+
+ bool ClearTrampolinePatch(WritableTargetFunction<MMPolicyT>& aOrigBytes,
+ const uintptr_t aPtrToResetToAddress) {
+ // The target of the trampoline we replaced is stored at
+ // aPtrToResetToAddress. We simply put it back where we got it from.
+ Maybe<uint8_t> maybeOpcode2 = aOrigBytes.ReadByte();
+ if (!maybeOpcode2) {
+ return false;
+ }
+
+ uint8_t opcode2 = maybeOpcode2.value();
+ if (opcode2 != 0xB8) {
+ return false;
+ }
+
+ auto oldPtr = *(reinterpret_cast<const uintptr_t*>(aPtrToResetToAddress));
+
+ aOrigBytes.WritePointer(oldPtr);
+ if (!aOrigBytes) {
+ return false;
+ }
+
+ return aOrigBytes.Commit();
+ }
+
+ bool Clear10BytePatch(WritableTargetFunction<MMPolicyT>& aOrigBytes) {
+ Maybe<uint32_t> maybePtr32 = aOrigBytes.ReadLong();
+ if (!maybePtr32) {
+ return false;
+ }
+
+ uint32_t ptr32 = maybePtr32.value();
+ // We expect the high bit to be clear
+ if (ptr32 & 0x80000000) {
+ return false;
+ }
+
+ uintptr_t trampPtr = ptr32;
+
+ // trampPtr points to an intermediate trampoline that contains a 13-byte
+ // patch. We back up by sizeof(uintptr_t) so that we can access the pointer
+ // to the stub trampoline.
+ WritableTargetFunction<MMPolicyT> writableIntermediate(
+ this->mVMPolicy, trampPtr - sizeof(uintptr_t), 13 + sizeof(uintptr_t));
+ if (!writableIntermediate) {
+ return false;
+ }
+
+ Maybe<uintptr_t> stubTramp = writableIntermediate.ReadEncodedPtr();
+ if (!stubTramp || !stubTramp.value()) {
+ return false;
+ }
+
+ Maybe<uint8_t> maybeOpcode1 = writableIntermediate.ReadByte();
+ if (!maybeOpcode1) {
+ return false;
+ }
+
+ // We expect this opcode to be the beginning of our normal mov r11, ptr
+ // patch sequence.
+ uint8_t opcode1 = maybeOpcode1.value();
+ if (opcode1 != 0x49) {
+ return false;
+ }
+
+ // Now we can just delegate the rest to our normal 13-byte patch clearing.
+ return Clear13BytePatch(writableIntermediate, stubTramp.value());
+ }
+#endif // defined(_M_X64)
+
+#if defined(_M_ARM64)
+ bool Clear4BytePatch(const uint32_t aBranchImm,
+ WritableTargetFunction<MMPolicyT>& aOrigBytes) {
+ MOZ_ASSERT(arm64::IsUnconditionalBranchImm(aBranchImm));
+
+ arm64::LoadOrBranch decoded = arm64::BUncondImmDecode(
+ aOrigBytes.GetCurrentAddress() - sizeof(uint32_t), aBranchImm);
+
+ uintptr_t trampPtr = decoded.mAbsAddress;
+
+ // trampPtr points to an intermediate trampoline that contains a veneer.
+ // We back up by sizeof(uintptr_t) so that we can access the pointer to the
+ // stub trampoline.
+
+ // We want trampLen to be the size of the veneer, plus one pointer (since
+ // we are backing up trampPtr by one pointer)
+ size_t trampLen = 16 + sizeof(uintptr_t);
+
+ WritableTargetFunction<MMPolicyT> writableIntermediate(
+ this->mVMPolicy, trampPtr - sizeof(uintptr_t), trampLen);
+ if (!writableIntermediate) {
+ return false;
+ }
+
+ Maybe<uintptr_t> stubTramp = writableIntermediate.ReadEncodedPtr();
+ if (!stubTramp || !stubTramp.value()) {
+ return false;
+ }
+
+ Maybe<uint32_t> inst1 = writableIntermediate.ReadLong();
+ if (!inst1 || inst1.value() != this->kLdrX16Plus8) {
+ return false;
+ }
+
+ return Clear16BytePatch(writableIntermediate, stubTramp.value());
+ }
+
+ bool Clear16BytePatch(WritableTargetFunction<MMPolicyT>& aOrigBytes,
+ const uintptr_t aResetToAddress) {
+ Maybe<uint32_t> inst2 = aOrigBytes.ReadLong();
+ if (!inst2) {
+ return false;
+ }
+
+ if (inst2.value() != arm64::BuildUnconditionalBranchToRegister(16)) {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized patch!");
+ return false;
+ }
+
+ // Clobber the pointer to our hook function with a pointer to the
+ // start of the trampoline.
+ aOrigBytes.WritePointer(aResetToAddress);
+ aOrigBytes.Commit();
+
+ return true;
+ }
+#endif // defined(_M_ARM64)
+
+ void Init(DetourFlags aFlags = DetourFlags::eDefault) {
+ if (Initialized()) {
+ return;
+ }
+
+#if defined(_M_X64)
+ if (aFlags & DetourFlags::eTestOnlyForceShortPatch) {
+ aFlags |= DetourFlags::eEnable10BytePatch;
+ }
+#endif // defined(_M_X64)
+
+ mFlags = Some(aFlags);
+ }
+
+ bool Initialized() const { return mFlags.isSome(); }
+
+ bool AddHook(FARPROC aTargetFn, intptr_t aHookDest, void** aOrigFunc) {
+ ReadOnlyTargetFunction<MMPolicyT> target(
+ (mFlags.value() & DetourFlags::eDontResolveRedirection)
+ ? ReadOnlyTargetFunction<MMPolicyT>(
+ this->mVMPolicy, reinterpret_cast<uintptr_t>(aTargetFn))
+ : this->ResolveRedirectedAddress(aTargetFn));
+
+ TrampPoolT* trampPool = nullptr;
+
+#if defined(_M_ARM64)
+ // ARM64 uses two passes to build its trampoline. The first pass uses a
+ // null tramp to determine how many bytes are needed. Once that is known,
+ // CreateTrampoline calls itself recursively with a "real" tramp.
+ Trampoline<MMPolicyT> tramp(nullptr);
+#else
+ Maybe<TrampPoolT> maybeTrampPool = DoReserve();
+ MOZ_ASSERT(maybeTrampPool);
+ if (!maybeTrampPool) {
+ return false;
+ }
+
+ trampPool = maybeTrampPool.ptr();
+
+ Maybe<Trampoline<MMPolicyT>> maybeTramp(trampPool->GetNextTrampoline());
+ if (!maybeTramp) {
+ this->SetLastDetourError(
+ DetourResultCode::DETOUR_PATCHER_NEXT_TRAMPOLINE_ERROR);
+ return false;
+ }
+
+ Trampoline<MMPolicyT> tramp(std::move(maybeTramp.ref()));
+#endif
+
+ CreateTrampoline(target, trampPool, tramp, aHookDest, aOrigFunc);
+ if (!*aOrigFunc) {
+ return false;
+ }
+
+ return true;
+ }
+
+ private:
+ /**
+ * This function returns a maximum distance that can be reached by a single
+ * unconditional jump instruction. This is dependent on the processor ISA.
+ * Note that this distance is *exclusive* when added to the pivot, so the
+ * distance returned by this function is actually
+ * (maximum_absolute_offset + 1).
+ */
+ static uint32_t GetDefaultPivotDistance() {
+#if defined(_M_ARM64)
+ // Immediate unconditional branch allows for +/- 128MB
+ return 0x08000000U;
+#elif defined(_M_IX86) || defined(_M_X64)
+ // For these ISAs, our distance will assume the use of an unconditional jmp
+ // with a 32-bit signed displacement.
+ return 0x80000000U;
+#else
+# error "Not defined for this processor arch"
+#endif
+ }
+
+ /**
+ * If we're reserving trampoline space for a specific module, we base the
+ * pivot off of the median address of the module's .text section. While this
+ * may not be precise, it should be accurate enough for our purposes: To
+ * ensure that the trampoline space is reachable by any executable code in the
+ * module.
+ */
+ Maybe<TrampPoolT> ReserveForModule(HMODULE aModule) {
+ nt::PEHeaders moduleHeaders(aModule);
+ if (!moduleHeaders) {
+ this->SetLastDetourError(
+ DetourResultCode::DETOUR_PATCHER_RESERVE_FOR_MODULE_PE_ERROR);
+ return Nothing();
+ }
+
+ Maybe<Span<const uint8_t>> textSectionInfo =
+ moduleHeaders.GetTextSectionInfo();
+ if (!textSectionInfo) {
+ this->SetLastDetourError(
+ DetourResultCode::DETOUR_PATCHER_RESERVE_FOR_MODULE_TEXT_ERROR);
+ return Nothing();
+ }
+
+ const uint8_t* median = textSectionInfo.value().data() +
+ (textSectionInfo.value().LengthBytes() / 2);
+
+ Maybe<TrampPoolT> maybeTrampPool = this->mVMPolicy.Reserve(
+ reinterpret_cast<uintptr_t>(median), GetDefaultPivotDistance());
+ if (!maybeTrampPool) {
+ this->SetLastDetourError(
+ DetourResultCode::DETOUR_PATCHER_RESERVE_FOR_MODULE_RESERVE_ERROR);
+ }
+ return maybeTrampPool;
+ }
+
+ Maybe<TrampPoolT> DoReserve(HMODULE aModule = nullptr) {
+ if (aModule) {
+ return ReserveForModule(aModule);
+ }
+
+ uintptr_t pivot = 0;
+ uint32_t distance = 0;
+
+#if defined(_M_X64)
+ if (mFlags.value() & DetourFlags::eEnable10BytePatch) {
+ // We must stay below the 2GB mark because a 10-byte patch uses movsxd
+ // (ie, sign extension) to expand the pointer to 64-bits, so bit 31 of any
+ // pointers into the reserved region must be 0.
+ pivot = 0x40000000U;
+ distance = 0x40000000U;
+ }
+#endif // defined(_M_X64)
+
+ Maybe<TrampPoolT> maybeTrampPool = this->mVMPolicy.Reserve(pivot, distance);
+#if defined(NIGHTLY_BUILD)
+ if (!maybeTrampPool && this->GetLastDetourError().isNothing()) {
+ this->SetLastDetourError(
+ DetourResultCode::DETOUR_PATCHER_DO_RESERVE_ERROR);
+ }
+#endif // defined(NIGHTLY_BUILD)
+ return maybeTrampPool;
+ }
+
+ protected:
+#if !defined(_M_ARM64)
+
+ const static int kPageSize = 4096;
+
+ // rex bits
+ static const BYTE kMaskHighNibble = 0xF0;
+ static const BYTE kRexOpcode = 0x40;
+ static const BYTE kMaskRexW = 0x08;
+ static const BYTE kMaskRexR = 0x04;
+ static const BYTE kMaskRexX = 0x02;
+ static const BYTE kMaskRexB = 0x01;
+
+ // mod r/m bits
+ static const BYTE kRegFieldShift = 3;
+ static const BYTE kMaskMod = 0xC0;
+ static const BYTE kMaskReg = 0x38;
+ static const BYTE kMaskRm = 0x07;
+ static const BYTE kRmNeedSib = 0x04;
+ static const BYTE kModReg = 0xC0;
+ static const BYTE kModDisp32 = 0x80;
+ static const BYTE kModDisp8 = 0x40;
+ static const BYTE kModNoRegDisp = 0x00;
+ static const BYTE kRmNoRegDispDisp32 = 0x05;
+
+ // sib bits
+ static const BYTE kMaskSibScale = 0xC0;
+ static const BYTE kMaskSibIndex = 0x38;
+ static const BYTE kMaskSibBase = 0x07;
+ static const BYTE kSibBaseEbp = 0x05;
+
+ // Register bit IDs.
+ static const BYTE kRegAx = 0x0;
+ static const BYTE kRegCx = 0x1;
+ static const BYTE kRegDx = 0x2;
+ static const BYTE kRegBx = 0x3;
+ static const BYTE kRegSp = 0x4;
+ static const BYTE kRegBp = 0x5;
+ static const BYTE kRegSi = 0x6;
+ static const BYTE kRegDi = 0x7;
+
+ // Special ModR/M codes. These indicate operands that cannot be simply
+ // memcpy-ed.
+ // Operand is a 64-bit RIP-relative address.
+ static const int kModOperand64 = -2;
+ // Operand is not yet handled by our trampoline.
+ static const int kModUnknown = -1;
+
+ /**
+ * Returns the number of bytes taken by the ModR/M byte, SIB (if present)
+ * and the instruction's operand. In special cases, the special MODRM codes
+ * above are returned.
+ * aModRm points to the ModR/M byte of the instruction.
+ * On return, aSubOpcode (if present) is filled with the subopcode/register
+ * code found in the ModR/M byte.
+ */
+ int CountModRmSib(const ReadOnlyTargetFunction<MMPolicyT>& aModRm,
+ BYTE* aSubOpcode = nullptr) {
+ int numBytes = 1; // Start with 1 for mod r/m byte itself
+ switch (*aModRm & kMaskMod) {
+ case kModReg:
+ return numBytes;
+ case kModDisp8:
+ numBytes += 1;
+ break;
+ case kModDisp32:
+ numBytes += 4;
+ break;
+ case kModNoRegDisp:
+ if ((*aModRm & kMaskRm) == kRmNoRegDispDisp32) {
+# if defined(_M_X64)
+ if (aSubOpcode) {
+ *aSubOpcode = (*aModRm & kMaskReg) >> kRegFieldShift;
+ }
+ return kModOperand64;
+# else
+ // On IA-32, all ModR/M instruction modes address memory relative to 0
+ numBytes += 4;
+# endif
+ } else if (((*aModRm & kMaskRm) == kRmNeedSib &&
+ (*(aModRm + 1) & kMaskSibBase) == kSibBaseEbp)) {
+ numBytes += 4;
+ }
+ break;
+ default:
+ // This should not be reachable
+ MOZ_ASSERT_UNREACHABLE("Impossible value for modr/m byte mod bits");
+ return kModUnknown;
+ }
+ if ((*aModRm & kMaskRm) == kRmNeedSib) {
+ // SIB byte
+ numBytes += 1;
+ }
+ if (aSubOpcode) {
+ *aSubOpcode = (*aModRm & kMaskReg) >> kRegFieldShift;
+ }
+ return numBytes;
+ }
+
+# if defined(_M_X64)
+ enum class JumpType{Je, Jne, Jae, Jmp, Call};
+
+ static bool GenerateJump(Trampoline<MMPolicyT>& aTramp,
+ uintptr_t aAbsTargetAddress, const JumpType aType) {
+ // Near call, absolute indirect, address given in r/m32
+ if (aType == JumpType::Call) {
+ // CALL [RIP+0]
+ aTramp.WriteByte(0xff);
+ aTramp.WriteByte(0x15);
+ // The offset to jump destination -- 2 bytes after the current position.
+ aTramp.WriteInteger(2);
+ aTramp.WriteByte(0xeb); // JMP + 8 (jump over target address)
+ aTramp.WriteByte(8);
+ aTramp.WritePointer(aAbsTargetAddress);
+ return !!aTramp;
+ }
+
+ // Write an opposite conditional jump because the destination branches
+ // are swapped.
+ if (aType == JumpType::Je) {
+ // JNE RIP+14
+ aTramp.WriteByte(0x75);
+ aTramp.WriteByte(14);
+ } else if (aType == JumpType::Jne) {
+ // JE RIP+14
+ aTramp.WriteByte(0x74);
+ aTramp.WriteByte(14);
+ } else if (aType == JumpType::Jae) {
+ // JB RIP+14
+ aTramp.WriteByte(0x72);
+ aTramp.WriteByte(14);
+ }
+
+ // Near jmp, absolute indirect, address given in r/m32
+ // JMP [RIP+0]
+ aTramp.WriteByte(0xff);
+ aTramp.WriteByte(0x25);
+ // The offset to jump destination is 0
+ aTramp.WriteInteger(0);
+ aTramp.WritePointer(aAbsTargetAddress);
+
+ return !!aTramp;
+ }
+# endif
+
+ enum ePrefixGroupBits{eNoPrefixes = 0, ePrefixGroup1 = (1 << 0),
+ ePrefixGroup2 = (1 << 1), ePrefixGroup3 = (1 << 2),
+ ePrefixGroup4 = (1 << 3)};
+
+ int CountPrefixBytes(const ReadOnlyTargetFunction<MMPolicyT>& aBytes,
+ unsigned char* aOutGroupBits) {
+ unsigned char& groupBits = *aOutGroupBits;
+ groupBits = eNoPrefixes;
+ int index = 0;
+ while (true) {
+ switch (aBytes[index]) {
+ // Group 1
+ case 0xF0: // LOCK
+ case 0xF2: // REPNZ
+ case 0xF3: // REP / REPZ
+ if (groupBits & ePrefixGroup1) {
+ return -1;
+ }
+ groupBits |= ePrefixGroup1;
+ ++index;
+ break;
+
+ // Group 2
+ case 0x2E: // CS override / branch not taken
+ case 0x36: // SS override
+ case 0x3E: // DS override / branch taken
+ case 0x64: // FS override
+ case 0x65: // GS override
+ if (groupBits & ePrefixGroup2) {
+ return -1;
+ }
+ groupBits |= ePrefixGroup2;
+ ++index;
+ break;
+
+ // Group 3
+ case 0x66: // operand size override
+ if (groupBits & ePrefixGroup3) {
+ return -1;
+ }
+ groupBits |= ePrefixGroup3;
+ ++index;
+ break;
+
+ // Group 4
+ case 0x67: // Address size override
+ if (groupBits & ePrefixGroup4) {
+ return -1;
+ }
+ groupBits |= ePrefixGroup4;
+ ++index;
+ break;
+
+ default:
+ return index;
+ }
+ }
+ }
+
+ // Return a ModR/M byte made from the 2 Mod bits, the register used for the
+ // reg bits and the register used for the R/M bits.
+ BYTE BuildModRmByte(BYTE aModBits, BYTE aReg, BYTE aRm) {
+ MOZ_ASSERT((aRm & kMaskRm) == aRm);
+ MOZ_ASSERT((aModBits & kMaskMod) == aModBits);
+ MOZ_ASSERT(((aReg << kRegFieldShift) & kMaskReg) ==
+ (aReg << kRegFieldShift));
+ return aModBits | (aReg << kRegFieldShift) | aRm;
+ }
+
+#endif // !defined(_M_ARM64)
+
+ // If originalFn is a recognized trampoline then patch it to call aDest,
+ // set *aTramp and *aOutTramp to that trampoline's target and return true.
+ bool PatchIfTargetIsRecognizedTrampoline(
+ Trampoline<MMPolicyT>& aTramp,
+ ReadOnlyTargetFunction<MMPolicyT>& aOriginalFn, intptr_t aDest,
+ void** aOutTramp) {
+#if defined(_M_X64)
+ // Variation 1:
+ // 48 b8 imm64 mov rax, imm64
+ // ff e0 jmp rax
+ //
+ // Variation 2:
+ // 48 b8 imm64 mov rax, imm64
+ // 50 push rax
+ // c3 ret
+ if ((aOriginalFn[0] == 0x48) && (aOriginalFn[1] == 0xB8) &&
+ ((aOriginalFn[10] == 0xFF && aOriginalFn[11] == 0xE0) ||
+ (aOriginalFn[10] == 0x50 && aOriginalFn[11] == 0xC3))) {
+ uintptr_t originalTarget =
+ (aOriginalFn + 2).template ChasePointer<uintptr_t>();
+
+ // Skip the first two bytes (48 b8) so that we can overwrite the imm64
+ WritableTargetFunction<MMPolicyT> target(aOriginalFn.Promote(8, 2));
+ if (!target) {
+ return false;
+ }
+
+ // Write the new JMP target address.
+ target.WritePointer(aDest);
+ if (!target.Commit()) {
+ return false;
+ }
+
+ // Store the old target address so we can restore it when we're cleared
+ aTramp.WritePointer(originalTarget);
+ if (!aTramp) {
+ return false;
+ }
+
+ *aOutTramp = reinterpret_cast<void*>(originalTarget);
+ return true;
+ }
+#endif // defined(_M_X64)
+
+ return false;
+ }
+
+#if defined(_M_ARM64)
+ bool Apply4BytePatch(TrampPoolT* aTrampPool, void* aTrampPtr,
+ WritableTargetFunction<MMPolicyT>& target,
+ intptr_t aDest) {
+ MOZ_ASSERT(aTrampPool);
+ if (!aTrampPool) {
+ return false;
+ }
+
+ uintptr_t hookDest = arm64::MakeVeneer(*aTrampPool, aTrampPtr, aDest);
+ if (!hookDest) {
+ return false;
+ }
+
+ Maybe<uint32_t> branchImm = arm64::BuildUnconditionalBranchImm(
+ target.GetCurrentAddress(), hookDest);
+ if (!branchImm) {
+ return false;
+ }
+
+ target.WriteLong(branchImm.value());
+
+ return true;
+ }
+#endif // defined(_M_ARM64)
+
+#if defined(_M_X64)
+ bool Apply10BytePatch(TrampPoolT* aTrampPool, void* aTrampPtr,
+ WritableTargetFunction<MMPolicyT>& target,
+ intptr_t aDest) {
+ // Note: Even if the target function is also below 2GB, we still use an
+ // intermediary trampoline so that we consistently have a 64-bit pointer
+ // that we can use to reset the trampoline upon interceptor shutdown.
+ Maybe<Trampoline<MMPolicyT>> maybeCallTramp(
+ aTrampPool->GetNextTrampoline());
+ if (!maybeCallTramp) {
+ return false;
+ }
+
+ Trampoline<MMPolicyT> callTramp(std::move(maybeCallTramp.ref()));
+
+ // Write a null instance so that Clear() does not consider this tramp to
+ // be a normal tramp to be torn down.
+ callTramp.WriteEncodedPointer(nullptr);
+ // Use the second pointer slot to store a pointer to the primary tramp
+ callTramp.WriteEncodedPointer(aTrampPtr);
+ callTramp.StartExecutableCode();
+
+ // mov r11, address
+ callTramp.WriteByte(0x49);
+ callTramp.WriteByte(0xbb);
+ callTramp.WritePointer(aDest);
+
+ // jmp r11
+ callTramp.WriteByte(0x41);
+ callTramp.WriteByte(0xff);
+ callTramp.WriteByte(0xe3);
+
+ void* callTrampStart = callTramp.EndExecutableCode();
+ if (!callTrampStart) {
+ return false;
+ }
+
+ target.WriteByte(0xB8); // MOV EAX, IMM32
+
+ // Assert that the topmost 33 bits are 0
+ MOZ_ASSERT(
+ !(reinterpret_cast<uintptr_t>(callTrampStart) & (~0x7FFFFFFFULL)));
+
+ target.WriteLong(static_cast<uint32_t>(
+ reinterpret_cast<uintptr_t>(callTrampStart) & 0x7FFFFFFFU));
+ target.WriteByte(0x48); // REX.W
+ target.WriteByte(0x63); // MOVSXD r64, r/m32
+ // dest: rax, src: eax
+ target.WriteByte(BuildModRmByte(kModReg, kRegAx, kRegAx));
+ target.WriteByte(0xFF); // JMP /4
+ target.WriteByte(BuildModRmByte(kModReg, 4, kRegAx)); // rax
+
+ return true;
+ }
+#endif // defined(_M_X64)
+
+ void CreateTrampoline(ReadOnlyTargetFunction<MMPolicyT>& origBytes,
+ TrampPoolT* aTrampPool, Trampoline<MMPolicyT>& aTramp,
+ intptr_t aDest, void** aOutTramp) {
+ *aOutTramp = nullptr;
+
+ Trampoline<MMPolicyT>& tramp = aTramp;
+ if (!tramp) {
+ this->SetLastDetourError(
+ DetourResultCode::DETOUR_PATCHER_INVALID_TRAMPOLINE);
+ return;
+ }
+
+ // The beginning of the trampoline contains two pointer-width slots:
+ // [0]: |this|, so that we know whether the trampoline belongs to us;
+ // [1]: Pointer to original function, so that we can reset the hooked
+ // function to its original behavior upon destruction. In rare cases
+ // where the function was already a different trampoline, this is
+ // just a pointer to that trampoline's target address.
+ tramp.WriteEncodedPointer(this);
+ if (!tramp) {
+ this->SetLastDetourError(
+ DetourResultCode::DETOUR_PATCHER_WRITE_POINTER_ERROR);
+ return;
+ }
+
+ auto clearInstanceOnFailure = MakeScopeExit([this, aOutTramp, &tramp,
+ &origBytes]() -> void {
+ // *aOutTramp is not set until CreateTrampoline has completed
+ // successfully, so we can use that to check for success.
+ if (*aOutTramp) {
+ return;
+ }
+
+ // Clear the instance pointer so that we don't try to reset a
+ // nonexistent hook.
+ tramp.Rewind();
+ tramp.WriteEncodedPointer(nullptr);
+
+#if defined(NIGHTLY_BUILD)
+ origBytes.Rewind();
+ this->SetLastDetourError(
+ DetourResultCode::DETOUR_PATCHER_CREATE_TRAMPOLINE_ERROR);
+ DetourError& lastError = *this->mVMPolicy.mLastError;
+ size_t bytesToCapture = std::min(
+ ArrayLength(lastError.mOrigBytes),
+ static_cast<size_t>(PrimitiveT::GetWorstCaseRequiredBytesToPatch()));
+# if defined(_M_ARM64)
+ size_t numInstructionsToCapture = bytesToCapture / sizeof(uint32_t);
+ auto origBytesDst = reinterpret_cast<uint32_t*>(lastError.mOrigBytes);
+ for (size_t i = 0; i < numInstructionsToCapture; ++i) {
+ origBytesDst[i] = origBytes.ReadNextInstruction();
+ }
+# else
+ for (size_t i = 0; i < bytesToCapture; ++i) {
+ lastError.mOrigBytes[i] = origBytes[i];
+ }
+# endif // defined(_M_ARM64)
+#else
+ // Silence -Wunused-lambda-capture in non-Nightly.
+ Unused << this;
+ Unused << origBytes;
+#endif // defined(NIGHTLY_BUILD)
+ });
+
+ tramp.WritePointer(origBytes.AsEncodedPtr());
+ if (!tramp) {
+ return;
+ }
+
+ if (PatchIfTargetIsRecognizedTrampoline(tramp, origBytes, aDest,
+ aOutTramp)) {
+ return;
+ }
+
+ tramp.StartExecutableCode();
+
+ constexpr uint32_t kWorstCaseBytesRequired =
+ PrimitiveT::GetWorstCaseRequiredBytesToPatch();
+
+#if defined(_M_IX86)
+ int pJmp32 = -1;
+ while (origBytes.GetOffset() < kWorstCaseBytesRequired) {
+ // Understand some simple instructions that might be found in a
+ // prologue; we might need to extend this as necessary.
+ //
+ // Note! If we ever need to understand jump instructions, we'll
+ // need to rewrite the displacement argument.
+ unsigned char prefixGroups;
+ int numPrefixBytes = CountPrefixBytes(origBytes, &prefixGroups);
+ if (numPrefixBytes < 0 ||
+ (prefixGroups & (ePrefixGroup3 | ePrefixGroup4))) {
+ // Either the prefix sequence was bad, or there are prefixes that
+ // we don't currently support (groups 3 and 4)
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+
+ origBytes += numPrefixBytes;
+ if (*origBytes >= 0x88 && *origBytes <= 0x8B) {
+ // various MOVs
+ ++origBytes;
+ int len = CountModRmSib(origBytes);
+ if (len < 0) {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized MOV opcode sequence");
+ return;
+ }
+ origBytes += len;
+ } else if (*origBytes == 0x0f &&
+ (origBytes[1] == 0x10 || origBytes[1] == 0x11)) {
+ // SSE: movups xmm, xmm/m128
+ // movups xmm/m128, xmm
+ origBytes += 2;
+ int len = CountModRmSib(origBytes);
+ if (len < 0) {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized MOV opcode sequence");
+ return;
+ }
+ origBytes += len;
+ } else if (*origBytes == 0xA1) {
+ // MOV eax, [seg:offset]
+ origBytes += 5;
+ } else if (*origBytes == 0xB8) {
+ // MOV 0xB8: http://ref.x86asm.net/coder32.html#xB8
+ origBytes += 5;
+ } else if (*origBytes == 0x33 && (origBytes[1] & kMaskMod) == kModReg) {
+ // XOR r32, r32
+ origBytes += 2;
+ } else if ((*origBytes & 0xf8) == 0x40) {
+ // INC r32
+ origBytes += 1;
+ } else if (*origBytes == 0x83) {
+ uint8_t mod = static_cast<uint8_t>(origBytes[1]) & kMaskMod;
+ uint8_t rm = static_cast<uint8_t>(origBytes[1]) & kMaskRm;
+ if (mod == kModReg) {
+ // ADD|OR|ADC|SBB|AND|SUB|XOR|CMP r, imm8
+ origBytes += 3;
+ } else if (mod == kModDisp8 && rm != kRmNeedSib) {
+ // ADD|OR|ADC|SBB|AND|SUB|XOR|CMP [r+disp8], imm8
+ origBytes += 4;
+ } else {
+ // bail
+ MOZ_ASSERT_UNREACHABLE("Unrecognized bit opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x68) {
+ // PUSH with 4-byte operand
+ origBytes += 5;
+ } else if ((*origBytes & 0xf0) == 0x50) {
+ // 1-byte PUSH/POP
+ ++origBytes;
+ } else if (*origBytes == 0x6A) {
+ // PUSH imm8
+ origBytes += 2;
+ } else if (*origBytes == 0xe9) {
+ pJmp32 = origBytes.GetOffset();
+ // jmp 32bit offset
+ origBytes += 5;
+ } else if (*origBytes == 0xff && origBytes[1] == 0x25) {
+ // jmp [disp32]
+ origBytes += 6;
+ } else if (*origBytes == 0xc2) {
+ // ret imm16. We can't handle this but it happens. We don't ASSERT but
+ // we do fail to hook.
+# if defined(MOZILLA_INTERNAL_API)
+ NS_WARNING("Cannot hook method -- RET opcode found");
+# endif
+ return;
+ } else {
+ // printf ("Unknown x86 instruction byte 0x%02x, aborting trampoline\n",
+ // *origBytes);
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ }
+
+ // The trampoline is a copy of the instructions that we just traced,
+ // followed by a jump that we add below.
+ tramp.CopyFrom(origBytes.GetBaseAddress(), origBytes.GetOffset());
+ if (!tramp) {
+ return;
+ }
+#elif defined(_M_X64)
+ bool foundJmp = false;
+ // |use10BytePatch| should always default to |false| in production. It is
+ // not set to true unless we detect that a 10-byte patch is necessary.
+ // OTOH, for testing purposes, if we want to force a 10-byte patch, we
+ // always initialize |use10BytePatch| to |true|.
+ bool use10BytePatch =
+ (mFlags.value() & DetourFlags::eTestOnlyForceShortPatch) ==
+ DetourFlags::eTestOnlyForceShortPatch;
+ const uint32_t bytesRequired =
+ use10BytePatch ? 10 : kWorstCaseBytesRequired;
+
+ while (origBytes.GetOffset() < bytesRequired) {
+ // If we found JMP 32bit offset, we require that the next bytes must
+ // be NOP or INT3. There is no reason to copy them.
+ // TODO: This used to trigger for Je as well. Now that I allow
+ // instructions after CALL and JE, I don't think I need that.
+ // The only real value of this condition is that if code follows a JMP
+ // then its _probably_ the target of a JMP somewhere else and we
+ // will be overwriting it, which would be tragic. This seems
+ // highly unlikely.
+ if (foundJmp) {
+ if (*origBytes == 0x90 || *origBytes == 0xcc) {
+ ++origBytes;
+ continue;
+ }
+
+ // If our trampoline space is located in the lowest 2GB, we can do a ten
+ // byte patch instead of a thirteen byte patch.
+ if (aTrampPool && aTrampPool->IsInLowest2GB() &&
+ origBytes.GetOffset() >= 10) {
+ use10BytePatch = true;
+ break;
+ }
+
+ MOZ_ASSERT_UNREACHABLE("Opcode sequence includes commands after JMP");
+ return;
+ }
+ if (*origBytes == 0x0f) {
+ COPY_CODES(1);
+ if (*origBytes == 0x1f) {
+ // nop (multibyte)
+ COPY_CODES(1);
+ if ((*origBytes & 0xc0) == 0x40 && (*origBytes & 0x7) == 0x04) {
+ COPY_CODES(3);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x05) {
+ // syscall
+ COPY_CODES(1);
+ } else if (*origBytes == 0x10 || *origBytes == 0x11) {
+ // SSE: movups xmm, xmm/m128
+ // movups xmm/m128, xmm
+ COPY_CODES(1);
+ int nModRmSibBytes = CountModRmSib(origBytes);
+ if (nModRmSibBytes < 0) {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ } else {
+ COPY_CODES(nModRmSibBytes);
+ }
+ } else if (*origBytes >= 0x83 && *origBytes <= 0x85) {
+ // 0f 83 cd JAE rel32
+ // 0f 84 cd JE rel32
+ // 0f 85 cd JNE rel32
+ const JumpType kJumpTypes[] = {JumpType::Jae, JumpType::Je,
+ JumpType::Jne};
+ auto jumpType = kJumpTypes[*origBytes - 0x83];
+ ++origBytes;
+ --tramp; // overwrite the 0x0f we copied above
+
+ if (!GenerateJump(tramp, origBytes.ReadDisp32AsAbsolute(),
+ jumpType)) {
+ return;
+ }
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes >= 0x88 && *origBytes <= 0x8B) {
+ // various 32-bit MOVs
+ COPY_CODES(1);
+ int len = CountModRmSib(origBytes);
+ if (len < 0) {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized MOV opcode sequence");
+ return;
+ }
+ COPY_CODES(len);
+ } else if (*origBytes == 0x40 || *origBytes == 0x41) {
+ // Plain REX or REX.B
+ COPY_CODES(1);
+ if ((*origBytes & 0xf0) == 0x50) {
+ // push/pop with Rx register
+ COPY_CODES(1);
+ } else if (*origBytes >= 0xb8 && *origBytes <= 0xbf) {
+ // mov r32, imm32
+ COPY_CODES(5);
+ } else if (*origBytes == 0x8b && (origBytes[1] & kMaskMod) == kModReg) {
+ // 8B /r: mov r32, r/m32
+ COPY_CODES(2);
+ } else if (*origBytes == 0xf7 &&
+ (origBytes[1] & (kMaskMod | kMaskReg)) ==
+ (kModReg | (0 << kRegFieldShift))) {
+ // F7 /0 id: test r/m32, imm32
+ COPY_CODES(6);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x44) {
+ // REX.R
+ COPY_CODES(1);
+
+ // TODO: Combine with the "0x89" case below in the REX.W section
+ if (*origBytes == 0x89) {
+ // mov r/m32, r32
+ COPY_CODES(1);
+ int len = CountModRmSib(origBytes);
+ if (len < 0) {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ COPY_CODES(len);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x45) {
+ // REX.R & REX.B
+ COPY_CODES(1);
+
+ if (*origBytes == 0x33) {
+ // xor r32, r32
+ COPY_CODES(2);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if ((*origBytes & 0xfa) == 0x48) {
+ // REX.W | REX.WR | REX.WRB | REX.WB
+ COPY_CODES(1);
+
+ if (*origBytes == 0x81 && (origBytes[1] & 0xf8) == 0xe8) {
+ // sub r, dword
+ COPY_CODES(6);
+ } else if (*origBytes == 0x83 && (origBytes[1] & 0xf8) == 0xe8) {
+ // sub r, byte
+ COPY_CODES(3);
+ } else if (*origBytes == 0x83 &&
+ (origBytes[1] & (kMaskMod | kMaskReg)) == kModReg) {
+ // add r, byte
+ COPY_CODES(3);
+ } else if (*origBytes == 0x83 && (origBytes[1] & 0xf8) == 0x60) {
+ // and [r+d], imm8
+ COPY_CODES(5);
+ } else if (*origBytes == 0x2b && (origBytes[1] & kMaskMod) == kModReg) {
+ // sub r64, r64
+ COPY_CODES(2);
+ } else if (*origBytes == 0x85) {
+ // 85 /r => TEST r/m32, r32
+ if ((origBytes[1] & 0xc0) == 0xc0) {
+ COPY_CODES(2);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if ((*origBytes & 0xfd) == 0x89) {
+ // MOV r/m64, r64 | MOV r64, r/m64
+ BYTE reg;
+ int len = CountModRmSib(origBytes + 1, &reg);
+ if (len < 0) {
+ MOZ_ASSERT(len == kModOperand64);
+ if (len != kModOperand64) {
+ return;
+ }
+ origBytes += 2; // skip the MOV and MOD R/M bytes
+
+ // The instruction MOVs 64-bit data from a RIP-relative memory
+ // address (determined with a 32-bit offset from RIP) into a
+ // 64-bit register.
+ uintptr_t absAddr = origBytes.ReadDisp32AsAbsolute();
+
+ if (reg == kRegAx) {
+ // Destination is RAX. Encode instruction as MOVABS with a
+ // 64-bit absolute address as its immediate operand.
+ tramp.WriteByte(0xa1);
+ tramp.WritePointer(absAddr);
+ } else {
+ // The MOV must be done in two steps. First, we MOVABS the
+ // absolute 64-bit address into our target register.
+ // Then, we MOV from that address into the register
+ // using register-indirect addressing.
+ tramp.WriteByte(0xb8 + reg);
+ tramp.WritePointer(absAddr);
+ tramp.WriteByte(0x48);
+ tramp.WriteByte(0x8b);
+ tramp.WriteByte(BuildModRmByte(kModNoRegDisp, reg, reg));
+ }
+ } else {
+ COPY_CODES(len + 1);
+ }
+ } else if ((*origBytes & 0xf8) == 0xb8) {
+ // MOV r64, imm64
+ COPY_CODES(9);
+ } else if (*origBytes == 0xc7) {
+ // MOV r/m64, imm32
+ if (origBytes[1] == 0x44) {
+ // MOV [r64+disp8], imm32
+ // ModR/W + SIB + disp8 + imm32
+ COPY_CODES(8);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0xff) {
+ // JMP/4 or CALL/2
+ if ((origBytes[1] & 0xc0) == 0x0 && (origBytes[1] & 0x07) == 0x5 &&
+ ((origBytes[1] & 0x38) == 0x20 ||
+ (origBytes[1] & 0x38) == 0x10)) {
+ origBytes += 2;
+ --tramp; // overwrite the REX.W/REX.RW we copied above
+
+ foundJmp = (origBytes[1] & 0x38) == 0x20;
+ if (!GenerateJump(tramp, origBytes.ChasePointerFromDisp(),
+ foundJmp ? JumpType::Jmp : JumpType::Call)) {
+ return;
+ }
+ } else {
+ // not support yet!
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x8d) {
+ // LEA reg, addr
+ if ((origBytes[1] & kMaskMod) == 0x0 &&
+ (origBytes[1] & kMaskRm) == 0x5) {
+ // [rip+disp32]
+ // convert 32bit offset to 64bit direct and convert instruction
+ // to a simple 64-bit mov
+ BYTE reg = (origBytes[1] & kMaskReg) >> kRegFieldShift;
+ origBytes += 2;
+ uintptr_t absAddr = origBytes.ReadDisp32AsAbsolute();
+ tramp.WriteByte(0xb8 + reg); // move
+ tramp.WritePointer(absAddr);
+ } else {
+ // Above we dealt with RIP-relative instructions. Any other
+ // operand form can simply be copied.
+ int len = CountModRmSib(origBytes + 1);
+ // We handled the kModOperand64 -- ie RIP-relative -- case above
+ MOZ_ASSERT(len > 0);
+ COPY_CODES(len + 1);
+ }
+ } else if (*origBytes == 0x63 && (origBytes[1] & kMaskMod) == kModReg) {
+ // movsxd r64, r32 (move + sign extend)
+ COPY_CODES(2);
+ } else {
+ // not support yet!
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x66) {
+ // operand override prefix
+ COPY_CODES(1);
+ // This is the same as the x86 version
+ if (*origBytes >= 0x88 && *origBytes <= 0x8B) {
+ // various MOVs
+ unsigned char b = origBytes[1];
+ if (((b & 0xc0) == 0xc0) ||
+ (((b & 0xc0) == 0x00) && ((b & 0x07) != 0x04) &&
+ ((b & 0x07) != 0x05))) {
+ // REG=r, R/M=r or REG=r, R/M=[r]
+ COPY_CODES(2);
+ } else if ((b & 0xc0) == 0x40) {
+ if ((b & 0x07) == 0x04) {
+ // REG=r, R/M=[SIB + disp8]
+ COPY_CODES(4);
+ } else {
+ // REG=r, R/M=[r + disp8]
+ COPY_CODES(3);
+ }
+ } else {
+ // complex MOV, bail
+ MOZ_ASSERT_UNREACHABLE("Unrecognized MOV opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x44 && origBytes[1] == 0x89) {
+ // mov word ptr [reg+disp8], reg
+ COPY_CODES(2);
+ int len = CountModRmSib(origBytes);
+ if (len < 0) {
+ // no way to support this yet.
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ COPY_CODES(len);
+ }
+ } else if ((*origBytes & 0xf0) == 0x50) {
+ // 1-byte push/pop
+ COPY_CODES(1);
+ } else if (*origBytes == 0x65) {
+ // GS prefix
+ //
+ // The entry of GetKeyState on Windows 10 has the following code.
+ // 65 48 8b 04 25 30 00 00 00 mov rax,qword ptr gs:[30h]
+ // (GS prefix + REX + MOV (0x8b) ...)
+ if (origBytes[1] == 0x48 &&
+ (origBytes[2] >= 0x88 && origBytes[2] <= 0x8b)) {
+ COPY_CODES(3);
+ int len = CountModRmSib(origBytes);
+ if (len < 0) {
+ // no way to support this yet.
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ COPY_CODES(len);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x80 && origBytes[1] == 0x3d) {
+ origBytes += 2;
+
+ // cmp byte ptr [rip-relative address], imm8
+ // We'll compute the absolute address and do the cmp in r11
+
+ // push r11 (to save the old value)
+ tramp.WriteByte(0x49);
+ tramp.WriteByte(0x53);
+
+ uintptr_t absAddr = origBytes.ReadDisp32AsAbsolute();
+
+ // mov r11, absolute address
+ tramp.WriteByte(0x49);
+ tramp.WriteByte(0xbb);
+ tramp.WritePointer(absAddr);
+
+ // cmp byte ptr [r11],...
+ tramp.WriteByte(0x41);
+ tramp.WriteByte(0x80);
+ tramp.WriteByte(0x3b);
+
+ // ...imm8
+ COPY_CODES(1);
+
+ // pop r11 (doesn't affect the flags from the cmp)
+ tramp.WriteByte(0x49);
+ tramp.WriteByte(0x5b);
+ } else if (*origBytes == 0x90) {
+ // nop
+ COPY_CODES(1);
+ } else if ((*origBytes & 0xf8) == 0xb8) {
+ // MOV r32, imm32
+ COPY_CODES(5);
+ } else if (*origBytes == 0x33) {
+ // xor r32, r/m32
+ COPY_CODES(2);
+ } else if (*origBytes == 0xf6) {
+ // test r/m8, imm8 (used by ntdll on Windows 10 x64)
+ // (no flags are affected by near jmp since there is no task switch,
+ // so it is ok for a jmp to be written immediately after a test)
+ BYTE subOpcode = 0;
+ int nModRmSibBytes = CountModRmSib(origBytes + 1, &subOpcode);
+ if (nModRmSibBytes < 0 || subOpcode != 0) {
+ // Unsupported
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ COPY_CODES(2 + nModRmSibBytes);
+ } else if (*origBytes == 0x85) {
+ // test r/m32, r32
+ int nModRmSibBytes = CountModRmSib(origBytes + 1);
+ if (nModRmSibBytes < 0) {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ COPY_CODES(1 + nModRmSibBytes);
+ } else if (*origBytes == 0xd1 && (origBytes[1] & kMaskMod) == kModReg) {
+ // bit shifts/rotates : (SA|SH|RO|RC)(R|L) r32
+ // (e.g. 0xd1 0xe0 is SAL, 0xd1 0xc8 is ROR)
+ COPY_CODES(2);
+ } else if (*origBytes == 0x83 && (origBytes[1] & kMaskMod) == kModReg) {
+ // ADD|OR|ADC|SBB|AND|SUB|XOR|CMP r, imm8
+ COPY_CODES(3);
+ } else if (*origBytes == 0xc3) {
+ // ret
+ COPY_CODES(1);
+ } else if (*origBytes == 0xcc) {
+ // int 3
+ COPY_CODES(1);
+ } else if (*origBytes == 0xe8 || *origBytes == 0xe9) {
+ // CALL (0xe8) or JMP (0xe9) 32bit offset
+ foundJmp = *origBytes == 0xe9;
+ ++origBytes;
+
+ if (!GenerateJump(tramp, origBytes.ReadDisp32AsAbsolute(),
+ foundJmp ? JumpType::Jmp : JumpType::Call)) {
+ return;
+ }
+ } else if (*origBytes >= 0x73 && *origBytes <= 0x75) {
+ // 73 cb JAE rel8
+ // 74 cb JE rel8
+ // 75 cb JNE rel8
+ const JumpType kJumpTypes[] = {JumpType::Jae, JumpType::Je,
+ JumpType::Jne};
+ auto jumpType = kJumpTypes[*origBytes - 0x73];
+ uint8_t offset = origBytes[1];
+
+ origBytes += 2;
+
+ if (!GenerateJump(tramp, origBytes.OffsetToAbsolute(offset),
+ jumpType)) {
+ return;
+ }
+ } else if (*origBytes == 0xff) {
+ uint8_t mod = origBytes[1] & kMaskMod;
+ uint8_t reg = (origBytes[1] & kMaskReg) >> kRegFieldShift;
+ uint8_t rm = origBytes[1] & kMaskRm;
+ if (mod == kModReg && (reg == 0 || reg == 1 || reg == 2 || reg == 6)) {
+ // INC|DEC|CALL|PUSH r64
+ COPY_CODES(2);
+ } else if (mod == kModNoRegDisp && reg == 2 &&
+ rm == kRmNoRegDispDisp32) {
+ // FF 15 CALL [disp32]
+ origBytes += 2;
+ if (!GenerateJump(tramp, origBytes.ChasePointerFromDisp(),
+ JumpType::Call)) {
+ return;
+ }
+ } else if (reg == 4) {
+ // FF /4 (Opcode=ff, REG=4): JMP r/m
+ if (mod == kModNoRegDisp && rm == kRmNoRegDispDisp32) {
+ // FF 25 JMP [disp32]
+ foundJmp = true;
+
+ origBytes += 2;
+
+ uintptr_t jmpDest = origBytes.ChasePointerFromDisp();
+
+ if (!GenerateJump(tramp, jmpDest, JumpType::Jmp)) {
+ return;
+ }
+ } else {
+ // JMP r/m except JMP [disp32]
+ int len = CountModRmSib(origBytes + 1);
+ if (len < 0) {
+ // RIP-relative not yet supported
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+
+ COPY_CODES(len + 1);
+
+ foundJmp = true;
+ }
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ } else if (*origBytes == 0x83 && (origBytes[1] & 0xf8) == 0x60) {
+ // and [r+d], imm8
+ COPY_CODES(5);
+ } else if (*origBytes == 0xc6) {
+ // mov [r+d], imm8
+ int len = CountModRmSib(origBytes + 1);
+ if (len < 0) {
+ // RIP-relative not yet supported
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ COPY_CODES(len + 2);
+ } else {
+ MOZ_ASSERT_UNREACHABLE("Unrecognized opcode sequence");
+ return;
+ }
+ }
+#elif defined(_M_ARM64)
+
+ // The number of bytes required to facilitate a detour depends on the
+ // proximity of the hook function to the target function. In the best case,
+ // we can branch within +/- 128MB of the current location, requiring only
+ // 4 bytes. In the worst case, we need 16 bytes to load an absolute address
+ // into a register and then branch to it.
+ const uint32_t bytesRequiredFromDecode =
+ (mFlags.value() & DetourFlags::eTestOnlyForceShortPatch)
+ ? 4
+ : kWorstCaseBytesRequired;
+
+ while (origBytes.GetOffset() < bytesRequiredFromDecode) {
+ uintptr_t curPC = origBytes.GetCurrentAbsolute();
+ uint32_t curInst = origBytes.ReadNextInstruction();
+
+ Result<arm64::LoadOrBranch, arm64::PCRelCheckError> pcRelInfo =
+ arm64::CheckForPCRel(curPC, curInst);
+ if (pcRelInfo.isErr()) {
+ if (pcRelInfo.unwrapErr() ==
+ arm64::PCRelCheckError::InstructionNotPCRel) {
+ // Instruction is not PC-relative, we can just copy it verbatim
+ tramp.WriteInstruction(curInst);
+ continue;
+ }
+
+ // At this point we have determined that there is no decoder available
+ // for the current, PC-relative, instruction.
+
+ // origBytes is now pointing one instruction past the one that we
+ // need the trampoline to jump back to.
+ if (!origBytes.BackUpOneInstruction()) {
+ return;
+ }
+
+ break;
+ }
+
+ // We need to load an absolute address into a particular register
+ tramp.WriteLoadLiteral(pcRelInfo.inspect().mAbsAddress,
+ pcRelInfo.inspect().mDestReg);
+ }
+
+#else
+# error "Unknown processor type"
+#endif
+
+ if (origBytes.GetOffset() > 100) {
+ // printf ("Too big!");
+ return;
+ }
+
+#if defined(_M_IX86)
+ if (pJmp32 >= 0) {
+ // Jump directly to the original target of the jump instead of jumping to
+ // the original function. Adjust jump target displacement to jump location
+ // in the trampoline.
+ tramp.AdjustDisp32AtOffset(pJmp32 + 1, origBytes.GetBaseAddress());
+ } else {
+ tramp.WriteByte(0xe9); // jmp
+ tramp.WriteDisp32(origBytes.GetAddress());
+ }
+#elif defined(_M_X64)
+ // If we found a Jmp, we don't need to add another instruction. However,
+ // if we found a _conditional_ jump or a CALL (or no control operations
+ // at all) then we still need to run the rest of aOriginalFunction.
+ if (!foundJmp) {
+ if (!GenerateJump(tramp, origBytes.GetAddress(), JumpType::Jmp)) {
+ return;
+ }
+ }
+#elif defined(_M_ARM64)
+ // Let's find out how many bytes we have available to us for patching
+ uint32_t numBytesForPatching = tramp.GetCurrentExecutableCodeLen();
+
+ if (!numBytesForPatching) {
+ // There's nothing we can do
+ return;
+ }
+
+ if (tramp.IsNull()) {
+ // Recursive case
+ HMODULE targetModule = nullptr;
+
+ if (numBytesForPatching < kWorstCaseBytesRequired) {
+ if (!::GetModuleHandleExW(
+ GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
+ GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
+ reinterpret_cast<LPCWSTR>(origBytes.GetBaseAddress()),
+ &targetModule)) {
+ return;
+ }
+ }
+
+ Maybe<TrampPoolT> maybeTrampPool = DoReserve(targetModule);
+ MOZ_ASSERT(maybeTrampPool);
+ if (!maybeTrampPool) {
+ return;
+ }
+
+ Maybe<Trampoline<MMPolicyT>> maybeRealTramp(
+ maybeTrampPool.ref().GetNextTrampoline());
+ if (!maybeRealTramp) {
+ return;
+ }
+
+ origBytes.Rewind();
+ CreateTrampoline(origBytes, maybeTrampPool.ptr(), maybeRealTramp.ref(),
+ aDest, aOutTramp);
+ return;
+ }
+
+ // Write the branch from the trampoline back to the original code
+
+ tramp.WriteLoadLiteral(origBytes.GetAddress(), 16);
+ tramp.WriteInstruction(arm64::BuildUnconditionalBranchToRegister(16));
+#else
+# error "Unsupported processor architecture"
+#endif
+
+ // The trampoline is now complete.
+ void* trampPtr = tramp.EndExecutableCode();
+ if (!trampPtr) {
+ return;
+ }
+
+#ifdef _M_X64
+ if constexpr (MMPolicyT::kSupportsUnwindInfo) {
+ DebugOnly<bool> unwindInfoAdded = tramp.AddUnwindInfo(
+ origBytes.GetBaseAddress(), origBytes.GetOffset());
+ MOZ_ASSERT(unwindInfoAdded);
+ }
+#endif // _M_X64
+
+ WritableTargetFunction<MMPolicyT> target(origBytes.Promote());
+ if (!target) {
+ return;
+ }
+
+ do {
+ // Now patch the original function.
+ // When we're instructed to apply a non-default patch, apply it and exit.
+ // If non-default patching fails, bail out, no fallback.
+ // Otherwise, we go straight to the default patch.
+
+#if defined(_M_X64)
+ if (use10BytePatch) {
+ if (!Apply10BytePatch(aTrampPool, trampPtr, target, aDest)) {
+ return;
+ }
+ break;
+ }
+#elif defined(_M_ARM64)
+ if (numBytesForPatching < kWorstCaseBytesRequired) {
+ if (!Apply4BytePatch(aTrampPool, trampPtr, target, aDest)) {
+ return;
+ }
+ break;
+ }
+#endif
+
+ PrimitiveT::ApplyDefaultPatch(target, aDest);
+ } while (false);
+
+ // Output the trampoline, thus signalling that this call was a success. This
+ // must happen before our patched function can be reached from another
+ // thread, so before we commit the target code (bug 1838286).
+ *aOutTramp = trampPtr;
+
+ if (!target.Commit()) {
+ *aOutTramp = nullptr;
+ }
+ }
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // mozilla_interceptor_PatcherDetour_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/PatcherNopSpace.h b/toolkit/xre/dllservices/mozglue/interceptor/PatcherNopSpace.h
new file mode 100644
index 0000000000..deee87e0f8
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/PatcherNopSpace.h
@@ -0,0 +1,205 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_PatcherNopSpace_h
+#define mozilla_interceptor_PatcherNopSpace_h
+
+#if defined(_M_IX86)
+
+# include "mozilla/interceptor/PatcherBase.h"
+
+namespace mozilla {
+namespace interceptor {
+
+template <typename VMPolicy>
+class WindowsDllNopSpacePatcher final : public WindowsDllPatcherBase<VMPolicy> {
+ typedef typename VMPolicy::MMPolicyT MMPolicyT;
+
+ // For remembering the addresses of functions we've patched.
+ mozilla::Vector<void*> mPatchedFns;
+
+ public:
+ template <typename... Args>
+ explicit WindowsDllNopSpacePatcher(Args&&... aArgs)
+ : WindowsDllPatcherBase<VMPolicy>(std::forward<Args>(aArgs)...) {}
+
+ ~WindowsDllNopSpacePatcher() { Clear(); }
+
+ WindowsDllNopSpacePatcher(const WindowsDllNopSpacePatcher&) = delete;
+ WindowsDllNopSpacePatcher(WindowsDllNopSpacePatcher&&) = delete;
+ WindowsDllNopSpacePatcher& operator=(const WindowsDllNopSpacePatcher&) =
+ delete;
+ WindowsDllNopSpacePatcher& operator=(WindowsDllNopSpacePatcher&&) = delete;
+
+ void Clear() {
+ // Restore the mov edi, edi to the beginning of each function we patched.
+
+ for (auto&& ptr : mPatchedFns) {
+ WritableTargetFunction<MMPolicyT> fn(
+ this->mVMPolicy, reinterpret_cast<uintptr_t>(ptr), sizeof(uint16_t));
+ if (!fn) {
+ continue;
+ }
+
+ // mov edi, edi
+ fn.CommitAndWriteShort(0xff8b);
+ }
+
+ mPatchedFns.clear();
+ }
+
+ /**
+ * NVIDIA Optimus drivers utilize Microsoft Detours 2.x to patch functions
+ * in our address space. There is a bug in Detours 2.x that causes it to
+ * patch at the wrong address when attempting to detour code that is already
+ * NOP space patched. This function is an effort to detect the presence of
+ * this NVIDIA code in our address space and disable NOP space patching if it
+ * is. We also check AppInit_DLLs since this is the mechanism that the Optimus
+ * drivers use to inject into our process.
+ */
+ static bool IsCompatible() {
+ // These DLLs are known to have bad interactions with this style of patching
+ const wchar_t* kIncompatibleDLLs[] = {L"detoured.dll", L"_etoured.dll",
+ L"nvd3d9wrap.dll", L"nvdxgiwrap.dll"};
+ // See if the infringing DLLs are already loaded
+ for (unsigned int i = 0; i < mozilla::ArrayLength(kIncompatibleDLLs); ++i) {
+ if (GetModuleHandleW(kIncompatibleDLLs[i])) {
+ return false;
+ }
+ }
+ if (GetModuleHandleW(L"user32.dll")) {
+ // user32 is loaded but the infringing DLLs are not, assume we're safe to
+ // proceed.
+ return true;
+ }
+ // If user32 has not loaded yet, check AppInit_DLLs to ensure that Optimus
+ // won't be loaded once user32 is initialized.
+ HKEY hkey = NULL;
+ if (!RegOpenKeyExW(
+ HKEY_LOCAL_MACHINE,
+ L"SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Windows", 0,
+ KEY_QUERY_VALUE, &hkey)) {
+ nsAutoRegKey key(hkey);
+ DWORD numBytes = 0;
+ const wchar_t kAppInitDLLs[] = L"AppInit_DLLs";
+ // Query for required buffer size
+ LONG status = RegQueryValueExW(hkey, kAppInitDLLs, nullptr, nullptr,
+ nullptr, &numBytes);
+ mozilla::UniquePtr<wchar_t[]> data;
+ if (!status) {
+ // Allocate the buffer and query for the actual data
+ data = mozilla::MakeUnique<wchar_t[]>((numBytes + 1) / sizeof(wchar_t));
+ status = RegQueryValueExW(hkey, kAppInitDLLs, nullptr, nullptr,
+ (LPBYTE)data.get(), &numBytes);
+ }
+ if (!status) {
+ // For each token, split up the filename components and then check the
+ // name of the file.
+ const wchar_t kDelimiters[] = L", ";
+ wchar_t* tokenContext = nullptr;
+ wchar_t* token = wcstok_s(data.get(), kDelimiters, &tokenContext);
+ while (token) {
+ wchar_t fname[_MAX_FNAME] = {0};
+ if (!_wsplitpath_s(token, nullptr, 0, nullptr, 0, fname,
+ mozilla::ArrayLength(fname), nullptr, 0)) {
+ // nvinit.dll is responsible for bootstrapping the DLL injection, so
+ // that is the library that we check for here
+ const wchar_t kNvInitName[] = L"nvinit";
+ if (!_wcsnicmp(fname, kNvInitName,
+ mozilla::ArrayLength(kNvInitName))) {
+ return false;
+ }
+ }
+ token = wcstok_s(nullptr, kDelimiters, &tokenContext);
+ }
+ }
+ }
+ return true;
+ }
+
+ bool AddHook(FARPROC aTargetFn, intptr_t aHookDest, void** aOrigFunc) {
+ if (!IsCompatible()) {
+# if defined(MOZILLA_INTERNAL_API)
+ NS_WARNING("NOP space patching is unavailable for compatibility reasons");
+# endif
+ return false;
+ }
+
+ MOZ_ASSERT(aTargetFn);
+ if (!aTargetFn) {
+ return false;
+ }
+
+ ReadOnlyTargetFunction<MMPolicyT> readOnlyTargetFn(
+ this->ResolveRedirectedAddress(aTargetFn));
+
+ if (!WriteHook(readOnlyTargetFn, aHookDest, aOrigFunc)) {
+ return false;
+ }
+
+ return mPatchedFns.append(
+ reinterpret_cast<void*>(readOnlyTargetFn.GetBaseAddress()));
+ }
+
+ bool WriteHook(const ReadOnlyTargetFunction<MMPolicyT>& aFn,
+ intptr_t aHookDest, void** aOrigFunc) {
+ // Ensure we can read and write starting at fn - 5 (for the long jmp we're
+ // going to write) and ending at fn + 2 (for the short jmp up to the long
+ // jmp). These bytes may span two pages with different protection.
+ WritableTargetFunction<MMPolicyT> writableFn(aFn.Promote(7, -5));
+ if (!writableFn) {
+ return false;
+ }
+
+ // Check that the 5 bytes before the function are NOP's or INT 3's,
+ const uint8_t nopOrBp[] = {0x90, 0xCC};
+ if (!writableFn.template VerifyValuesAreOneOf<uint8_t, 5>(nopOrBp)) {
+ return false;
+ }
+
+ // ... and that the first 2 bytes of the function are mov(edi, edi).
+ // There are two ways to encode the same thing:
+ //
+ // 0x89 0xff == mov r/m, r
+ // 0x8b 0xff == mov r, r/m
+ //
+ // where "r" is register and "r/m" is register or memory.
+ // Windows seems to use 0x8B 0xFF. We include 0x89 0xFF out of paranoia.
+
+ // (These look backwards because little-endian)
+ const uint16_t possibleEncodings[] = {0xFF8B, 0xFF89};
+ if (!writableFn.template VerifyValuesAreOneOf<uint16_t, 1>(
+ possibleEncodings, 5)) {
+ return false;
+ }
+
+ // Write a long jump into the space above the function.
+ writableFn.WriteByte(0xe9); // jmp
+ if (!writableFn) {
+ return false;
+ }
+
+ writableFn.WriteDisp32(aHookDest); // target
+ if (!writableFn) {
+ return false;
+ }
+
+ // Set aOrigFunc here, because after this point, aHookDest might be called,
+ // and aHookDest might use the aOrigFunc pointer.
+ *aOrigFunc = reinterpret_cast<void*>(writableFn.GetCurrentAddress() +
+ sizeof(uint16_t));
+
+ // Short jump up into our long jump.
+ return writableFn.CommitAndWriteShort(0xF9EB); // jmp $-5
+ }
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // defined(_M_IX86)
+
+#endif // mozilla_interceptor_PatcherNopSpace_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/RangeMap.h b/toolkit/xre/dllservices/mozglue/interceptor/RangeMap.h
new file mode 100644
index 0000000000..d45d031613
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/RangeMap.h
@@ -0,0 +1,142 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_RangeMap_h
+#define mozilla_interceptor_RangeMap_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/mozalloc.h"
+#include "mozilla/Span.h"
+#include "mozilla/UniquePtr.h"
+#include "mozilla/Vector.h"
+
+#include <algorithm>
+
+namespace mozilla {
+namespace interceptor {
+
+/**
+ * This class maintains a vector of VMSharingPolicyUnique objects, sorted on
+ * the memory range that is used for reserving each object.
+ *
+ * This is used by VMSharingPolicyShared for creating and looking up VM regions
+ * that are within proximity of the applicable range.
+ *
+ * VMSharingPolicyUnique objects managed by this class are reused whenever
+ * possible. If no range is required, we just return the first available
+ * policy.
+ *
+ * If no range is required and no policies have yet been allocated, we create
+ * a new one with a null range as a default.
+ */
+template <typename MMPolicyT>
+class RangeMap final {
+ private:
+ /**
+ * This class is used as the comparison key for sorting and insertion.
+ */
+ class Range {
+ public:
+ constexpr Range() : mBase(0), mLimit(0) {}
+
+ explicit Range(const Maybe<Span<const uint8_t>>& aBounds)
+ : mBase(aBounds ? reinterpret_cast<const uintptr_t>(
+ MMPolicyT::GetLowerBound(aBounds.ref()))
+ : 0),
+ mLimit(aBounds ? reinterpret_cast<const uintptr_t>(
+ MMPolicyT::GetUpperBoundIncl(aBounds.ref()))
+ : 0) {}
+
+ Range& operator=(const Range&) = default;
+ Range(const Range&) = default;
+ Range(Range&&) = default;
+ Range& operator=(Range&&) = default;
+
+ bool operator<(const Range& aOther) const {
+ return mBase < aOther.mBase ||
+ (mBase == aOther.mBase && mLimit < aOther.mLimit);
+ }
+
+ bool Contains(const Range& aOther) const {
+ return mBase <= aOther.mBase && mLimit >= aOther.mLimit;
+ }
+
+ private:
+ uintptr_t mBase;
+ uintptr_t mLimit;
+ };
+
+ class PolicyInfo final : public Range {
+ public:
+ explicit PolicyInfo(const Range& aRange)
+ : Range(aRange),
+ mPolicy(MakeUnique<VMSharingPolicyUnique<MMPolicyT>>()) {}
+
+ PolicyInfo(const PolicyInfo&) = delete;
+ PolicyInfo& operator=(const PolicyInfo&) = delete;
+
+ PolicyInfo(PolicyInfo&& aOther) = default;
+ PolicyInfo& operator=(PolicyInfo&& aOther) = default;
+
+ VMSharingPolicyUnique<MMPolicyT>* GetPolicy() { return mPolicy.get(); }
+
+ private:
+ UniquePtr<VMSharingPolicyUnique<MMPolicyT>> mPolicy;
+ };
+
+ using VectorType = Vector<PolicyInfo, 0, InfallibleAllocPolicy>;
+
+ public:
+ constexpr RangeMap() : mPolicies(nullptr) {}
+
+ VMSharingPolicyUnique<MMPolicyT>* GetPolicy(
+ const Maybe<Span<const uint8_t>>& aBounds) {
+ Range testRange(aBounds);
+
+ if (!mPolicies) {
+ mPolicies = new VectorType();
+ }
+
+ // If no bounds are specified, we just use the first available policy
+ if (!aBounds) {
+ if (mPolicies->empty()) {
+ if (!mPolicies->append(PolicyInfo(testRange))) {
+ return nullptr;
+ }
+ }
+
+ return GetFirstPolicy();
+ }
+
+ // mPolicies is sorted, so we search
+ auto itr =
+ std::lower_bound(mPolicies->begin(), mPolicies->end(), testRange);
+ if (itr != mPolicies->end() && itr->Contains(testRange)) {
+ return itr->GetPolicy();
+ }
+
+ itr = mPolicies->insert(itr, PolicyInfo(testRange));
+
+ MOZ_ASSERT(std::is_sorted(mPolicies->begin(), mPolicies->end()));
+
+ return itr->GetPolicy();
+ }
+
+ private:
+ VMSharingPolicyUnique<MMPolicyT>* GetFirstPolicy() {
+ MOZ_RELEASE_ASSERT(mPolicies && !mPolicies->empty());
+ return mPolicies->begin()->GetPolicy();
+ }
+
+ private:
+ VectorType* mPolicies;
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // mozilla_interceptor_RangeMap_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/TargetFunction.h b/toolkit/xre/dllservices/mozglue/interceptor/TargetFunction.h
new file mode 100644
index 0000000000..b79867edcd
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/TargetFunction.h
@@ -0,0 +1,1000 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_TargetFunction_h
+#define mozilla_interceptor_TargetFunction_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/BinarySearch.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Maybe.h"
+
+#include "mozilla/Types.h"
+#include "mozilla/Unused.h"
+#include "mozilla/Vector.h"
+
+#include <memory>
+#include <type_traits>
+
+namespace mozilla {
+namespace interceptor {
+
+#if defined(_M_IX86)
+
+template <typename T>
+bool CommitAndWriteShortInternal(const T& aMMPolicy, void* aDest,
+ uint16_t aValue);
+
+template <>
+inline bool CommitAndWriteShortInternal<MMPolicyInProcess>(
+ const MMPolicyInProcess& aMMPolicy, void* aDest, uint16_t aValue) {
+ return aMMPolicy.WriteAtomic(aDest, aValue);
+}
+
+template <>
+inline bool CommitAndWriteShortInternal<MMPolicyOutOfProcess>(
+ const MMPolicyOutOfProcess& aMMPolicy, void* aDest, uint16_t aValue) {
+ return aMMPolicy.Write(aDest, &aValue, sizeof(uint16_t));
+}
+
+#endif // defined(_M_IX86)
+
+// Forward declaration
+template <typename MMPolicy>
+class ReadOnlyTargetFunction;
+
+template <typename MMPolicy>
+class MOZ_STACK_CLASS WritableTargetFunction final {
+ class AutoProtect final {
+ using ProtectParams = std::tuple<uintptr_t, uint32_t>;
+
+ public:
+ explicit AutoProtect(const MMPolicy& aMMPolicy) : mMMPolicy(aMMPolicy) {}
+
+ AutoProtect(const MMPolicy& aMMPolicy, uintptr_t aAddr, size_t aNumBytes,
+ uint32_t aNewProt)
+ : mMMPolicy(aMMPolicy) {
+ const uint32_t pageSize = mMMPolicy.GetPageSize();
+ const uintptr_t limit = aAddr + aNumBytes - 1;
+ const uintptr_t limitPageNum = limit / pageSize;
+ const uintptr_t basePageNum = aAddr / pageSize;
+ const uintptr_t numPagesToChange = limitPageNum - basePageNum + 1;
+
+ // We'll use the base address of the page instead of aAddr
+ uintptr_t curAddr = basePageNum * pageSize;
+
+ // Now change the protection on each page
+ for (uintptr_t curPage = 0; curPage < numPagesToChange;
+ ++curPage, curAddr += pageSize) {
+ uint32_t prevProt;
+ if (!aMMPolicy.Protect(reinterpret_cast<void*>(curAddr), pageSize,
+ aNewProt, &prevProt)) {
+ Clear();
+ return;
+ }
+
+ // Save the previous protection for curAddr so that we can revert this
+ // in the destructor.
+ if (!mProtects.append(std::make_tuple(curAddr, prevProt))) {
+ Clear();
+ return;
+ }
+ }
+ }
+
+ AutoProtect(AutoProtect&& aOther)
+ : mMMPolicy(aOther.mMMPolicy), mProtects(std::move(aOther.mProtects)) {
+ aOther.mProtects.clear();
+ }
+
+ ~AutoProtect() { Clear(); }
+
+ explicit operator bool() const { return !mProtects.empty(); }
+
+ AutoProtect(const AutoProtect&) = delete;
+ AutoProtect& operator=(const AutoProtect&) = delete;
+ AutoProtect& operator=(AutoProtect&&) = delete;
+
+ private:
+ void Clear() {
+ const uint32_t pageSize = mMMPolicy.GetPageSize();
+ for (auto&& entry : mProtects) {
+ uint32_t prevProt;
+ DebugOnly<bool> ok =
+ mMMPolicy.Protect(reinterpret_cast<void*>(std::get<0>(entry)),
+ pageSize, std::get<1>(entry), &prevProt);
+ MOZ_ASSERT(ok);
+ }
+
+ mProtects.clear();
+ }
+
+ private:
+ const MMPolicy& mMMPolicy;
+ // We include two entries of inline storage as that is most common in the
+ // worst case.
+ Vector<ProtectParams, 2> mProtects;
+ };
+
+ public:
+ /**
+ * Used to initialize an invalid WritableTargetFunction, thus signalling an
+ * error.
+ */
+ explicit WritableTargetFunction(const MMPolicy& aMMPolicy)
+ : mMMPolicy(aMMPolicy),
+ mFunc(0),
+ mNumBytes(0),
+ mOffset(0),
+ mStartWriteOffset(0),
+ mAccumulatedStatus(false),
+ mProtect(aMMPolicy) {}
+
+ WritableTargetFunction(const MMPolicy& aMMPolicy, uintptr_t aFunc,
+ size_t aNumBytes)
+ : mMMPolicy(aMMPolicy),
+ mFunc(aFunc),
+ mNumBytes(aNumBytes),
+ mOffset(0),
+ mStartWriteOffset(0),
+ mAccumulatedStatus(true),
+ mProtect(aMMPolicy, aFunc, aNumBytes, PAGE_EXECUTE_READWRITE) {}
+
+ WritableTargetFunction(WritableTargetFunction&& aOther)
+ : mMMPolicy(aOther.mMMPolicy),
+ mFunc(aOther.mFunc),
+ mNumBytes(aOther.mNumBytes),
+ mOffset(aOther.mOffset),
+ mStartWriteOffset(aOther.mStartWriteOffset),
+ mLocalBytes(std::move(aOther.mLocalBytes)),
+ mAccumulatedStatus(aOther.mAccumulatedStatus),
+ mProtect(std::move(aOther.mProtect)) {
+ aOther.mAccumulatedStatus = false;
+ }
+
+ ~WritableTargetFunction() {
+ MOZ_ASSERT(mLocalBytes.empty(), "Did you forget to call Commit?");
+ }
+
+ WritableTargetFunction(const WritableTargetFunction&) = delete;
+ WritableTargetFunction& operator=(const WritableTargetFunction&) = delete;
+ WritableTargetFunction& operator=(WritableTargetFunction&&) = delete;
+
+ /**
+ * @return true if data was successfully committed.
+ */
+ bool Commit() {
+ if (!(*this)) {
+ return false;
+ }
+
+ if (mLocalBytes.empty()) {
+ // Nothing to commit, treat like success
+ return true;
+ }
+
+ bool ok =
+ mMMPolicy.Write(reinterpret_cast<void*>(mFunc + mStartWriteOffset),
+ mLocalBytes.begin(), mLocalBytes.length());
+ if (!ok) {
+ return false;
+ }
+
+ mMMPolicy.FlushInstructionCache();
+
+ mStartWriteOffset += mLocalBytes.length();
+
+ mLocalBytes.clear();
+ return true;
+ }
+
+ explicit operator bool() const { return mProtect && mAccumulatedStatus; }
+
+ void WriteByte(const uint8_t& aValue) {
+ if (!mLocalBytes.append(aValue)) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ mOffset += sizeof(uint8_t);
+ }
+
+ Maybe<uint8_t> ReadByte() {
+ // Reading is only permitted prior to any writing
+ MOZ_ASSERT(mOffset == mStartWriteOffset);
+ if (mOffset > mStartWriteOffset) {
+ mAccumulatedStatus = false;
+ return Nothing();
+ }
+
+ uint8_t value;
+ if (!mMMPolicy.Read(&value, reinterpret_cast<const void*>(mFunc + mOffset),
+ sizeof(uint8_t))) {
+ mAccumulatedStatus = false;
+ return Nothing();
+ }
+
+ mOffset += sizeof(uint8_t);
+ mStartWriteOffset += sizeof(uint8_t);
+ return Some(value);
+ }
+
+ Maybe<uintptr_t> ReadEncodedPtr() {
+ // Reading is only permitted prior to any writing
+ MOZ_ASSERT(mOffset == mStartWriteOffset);
+ if (mOffset > mStartWriteOffset) {
+ mAccumulatedStatus = false;
+ return Nothing();
+ }
+
+ uintptr_t value;
+ if (!mMMPolicy.Read(&value, reinterpret_cast<const void*>(mFunc + mOffset),
+ sizeof(uintptr_t))) {
+ mAccumulatedStatus = false;
+ return Nothing();
+ }
+
+ mOffset += sizeof(uintptr_t);
+ mStartWriteOffset += sizeof(uintptr_t);
+ return Some(ReadOnlyTargetFunction<MMPolicy>::DecodePtr(value));
+ }
+
+ Maybe<uint32_t> ReadLong() {
+ // Reading is only permitted prior to any writing
+ MOZ_ASSERT(mOffset == mStartWriteOffset);
+ if (mOffset > mStartWriteOffset) {
+ mAccumulatedStatus = false;
+ return Nothing();
+ }
+
+ uint32_t value;
+ if (!mMMPolicy.Read(&value, reinterpret_cast<const void*>(mFunc + mOffset),
+ sizeof(uint32_t))) {
+ mAccumulatedStatus = false;
+ return Nothing();
+ }
+
+ mOffset += sizeof(uint32_t);
+ mStartWriteOffset += sizeof(uint32_t);
+ return Some(value);
+ }
+
+ void WriteShort(const uint16_t& aValue) {
+ if (!mLocalBytes.append(reinterpret_cast<const uint8_t*>(&aValue),
+ sizeof(uint16_t))) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ mOffset += sizeof(uint16_t);
+ }
+
+#if defined(_M_IX86)
+ public:
+ /**
+ * Commits any dirty writes, and then writes a short, atomically if possible.
+ * This call may succeed in both inproc and outproc cases, but atomicity
+ * is only guaranteed in the inproc case.
+ */
+ bool CommitAndWriteShort(const uint16_t aValue) {
+ // First, commit everything that has been written until now
+ if (!Commit()) {
+ return false;
+ }
+
+ // Now immediately write the short, atomically if inproc
+ bool ok = CommitAndWriteShortInternal(
+ mMMPolicy, reinterpret_cast<void*>(mFunc + mStartWriteOffset), aValue);
+ if (!ok) {
+ return false;
+ }
+
+ mMMPolicy.FlushInstructionCache();
+ mStartWriteOffset += sizeof(uint16_t);
+ return true;
+ }
+#endif // defined(_M_IX86)
+
+ void WriteDisp32(const uintptr_t aAbsTarget) {
+ intptr_t diff = static_cast<intptr_t>(aAbsTarget) -
+ static_cast<intptr_t>(mFunc + mOffset + sizeof(int32_t));
+
+ CheckedInt<int32_t> checkedDisp(diff);
+ MOZ_ASSERT(checkedDisp.isValid());
+ if (!checkedDisp.isValid()) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ int32_t disp = checkedDisp.value();
+ if (!mLocalBytes.append(reinterpret_cast<uint8_t*>(&disp),
+ sizeof(int32_t))) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ mOffset += sizeof(int32_t);
+ }
+
+#if defined(_M_X64) || defined(_M_ARM64)
+ void WriteLong(const uint32_t aValue) {
+ if (!mLocalBytes.append(reinterpret_cast<const uint8_t*>(&aValue),
+ sizeof(uint32_t))) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ mOffset += sizeof(uint32_t);
+ }
+#endif // defined(_M_X64)
+
+ void WritePointer(const uintptr_t aAbsTarget) {
+ if (!mLocalBytes.append(reinterpret_cast<const uint8_t*>(&aAbsTarget),
+ sizeof(uintptr_t))) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ mOffset += sizeof(uintptr_t);
+ }
+
+ /**
+ * @param aValues N-sized array of type T that specifies the set of values
+ * that are permissible in the first M bytes of the target
+ * function at aOffset.
+ * @return true if M values of type T in the function are members of the
+ * set specified by aValues.
+ */
+ template <typename T, size_t M, size_t N>
+ bool VerifyValuesAreOneOf(const T (&aValues)[N], const uint8_t aOffset = 0) {
+ T buf[M];
+ if (!mMMPolicy.Read(
+ buf, reinterpret_cast<const void*>(mFunc + mOffset + aOffset),
+ M * sizeof(T))) {
+ return false;
+ }
+
+ for (auto&& fnValue : buf) {
+ bool match = false;
+ for (auto&& testValue : aValues) {
+ match |= (fnValue == testValue);
+ }
+
+ if (!match) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ uintptr_t GetCurrentAddress() const { return mFunc + mOffset; }
+
+ private:
+ const MMPolicy& mMMPolicy;
+ const uintptr_t mFunc;
+ const size_t mNumBytes;
+ uint32_t mOffset;
+ uint32_t mStartWriteOffset;
+
+ // In an ideal world, we'd only read 5 bytes on 32-bit and 13 bytes on 64-bit,
+ // to match the minimum bytes that we need to write in in order to patch the
+ // target function. Since the actual opcodes will often require us to pull in
+ // extra bytes above that minimum, we set the inline storage to be larger than
+ // those minima in an effort to give the Vector extra wiggle room before it
+ // needs to touch the heap.
+#if defined(_M_IX86)
+ static const size_t kInlineStorage = 16;
+#elif defined(_M_X64) || defined(_M_ARM64)
+ static const size_t kInlineStorage = 32;
+#endif
+ Vector<uint8_t, kInlineStorage> mLocalBytes;
+ bool mAccumulatedStatus;
+ AutoProtect mProtect;
+};
+
+template <typename MMPolicy>
+class ReadOnlyTargetBytes {
+ public:
+ ReadOnlyTargetBytes(const MMPolicy& aMMPolicy, const void* aBase)
+ : mMMPolicy(aMMPolicy), mBase(reinterpret_cast<const uint8_t*>(aBase)) {}
+
+ ReadOnlyTargetBytes(ReadOnlyTargetBytes&& aOther)
+ : mMMPolicy(aOther.mMMPolicy), mBase(aOther.mBase) {}
+
+ ReadOnlyTargetBytes(const ReadOnlyTargetBytes& aOther,
+ const uint32_t aOffsetFromOther = 0)
+ : mMMPolicy(aOther.mMMPolicy), mBase(aOther.mBase + aOffsetFromOther) {}
+
+ void EnsureLimit(uint32_t aDesiredLimit) {
+ // In the out-proc case we use this function to read the target function's
+ // bytes in the other process into a local buffer. We don't need that for
+ // the in-process case because we already have direct access to our target
+ // function's bytes.
+ }
+
+ uint32_t TryEnsureLimit(uint32_t aDesiredLimit) {
+ // Same as EnsureLimit above. We don't need to ensure for the in-process.
+ return aDesiredLimit;
+ }
+
+ bool IsValidAtOffset(const int8_t aOffset) const {
+ if (!aOffset) {
+ return true;
+ }
+
+ uintptr_t base = reinterpret_cast<uintptr_t>(mBase);
+ uintptr_t adjusted = base + aOffset;
+ uint32_t pageSize = mMMPolicy.GetPageSize();
+
+ // If |adjusted| is within the same page as |mBase|, we're still valid
+ if ((base / pageSize) == (adjusted / pageSize)) {
+ return true;
+ }
+
+ // Otherwise, let's query |adjusted|
+ return mMMPolicy.IsPageAccessible(adjusted);
+ }
+
+ /**
+ * This returns a pointer to a *potentially local copy* of the target
+ * function's bytes. The returned pointer should not be used for any
+ * pointer arithmetic relating to the target function.
+ */
+ const uint8_t* GetLocalBytes() const { return mBase; }
+
+ /**
+ * This returns a pointer to the target function's bytes. The returned pointer
+ * may possibly belong to another process, so while it should be used for
+ * pointer arithmetic, it *must not* be dereferenced.
+ */
+ uintptr_t GetBase() const { return reinterpret_cast<uintptr_t>(mBase); }
+
+ const MMPolicy& GetMMPolicy() const { return mMMPolicy; }
+
+ ReadOnlyTargetBytes& operator=(const ReadOnlyTargetBytes&) = delete;
+ ReadOnlyTargetBytes& operator=(ReadOnlyTargetBytes&&) = delete;
+
+ private:
+ const MMPolicy& mMMPolicy;
+ uint8_t const* const mBase;
+};
+
+template <>
+class ReadOnlyTargetBytes<MMPolicyOutOfProcess> {
+ public:
+ ReadOnlyTargetBytes(const MMPolicyOutOfProcess& aMMPolicy, const void* aBase)
+ : mMMPolicy(aMMPolicy), mBase(reinterpret_cast<const uint8_t*>(aBase)) {}
+
+ ReadOnlyTargetBytes(ReadOnlyTargetBytes&& aOther)
+ : mMMPolicy(aOther.mMMPolicy),
+ mLocalBytes(std::move(aOther.mLocalBytes)),
+ mBase(aOther.mBase) {}
+
+ ReadOnlyTargetBytes(const ReadOnlyTargetBytes& aOther)
+ : mMMPolicy(aOther.mMMPolicy), mBase(aOther.mBase) {
+ Unused << mLocalBytes.appendAll(aOther.mLocalBytes);
+ }
+
+ ReadOnlyTargetBytes(const ReadOnlyTargetBytes& aOther,
+ const uint32_t aOffsetFromOther)
+ : mMMPolicy(aOther.mMMPolicy), mBase(aOther.mBase + aOffsetFromOther) {
+ if (aOffsetFromOther >= aOther.mLocalBytes.length()) {
+ return;
+ }
+
+ Unused << mLocalBytes.append(aOther.mLocalBytes.begin() + aOffsetFromOther,
+ aOther.mLocalBytes.end());
+ }
+
+ void EnsureLimit(uint32_t aDesiredLimit) {
+ size_t prevSize = mLocalBytes.length();
+ if (aDesiredLimit < prevSize) {
+ return;
+ }
+
+ size_t newSize = aDesiredLimit + 1;
+ if (newSize < kInlineStorage) {
+ // Always try to read as much memory as we can at once
+ newSize = kInlineStorage;
+ }
+
+ bool resizeOk = mLocalBytes.resize(newSize);
+ MOZ_RELEASE_ASSERT(resizeOk);
+
+ bool ok = mMMPolicy.Read(&mLocalBytes[prevSize], mBase + prevSize,
+ newSize - prevSize);
+ if (ok) {
+ return;
+ }
+
+ // We couldn't pull more bytes than needed (which may happen if those extra
+ // bytes are not accessible). In this case, we try just to get the bare
+ // minimum.
+ newSize = aDesiredLimit + 1;
+ resizeOk = mLocalBytes.resize(newSize);
+ MOZ_RELEASE_ASSERT(resizeOk);
+
+ ok = mMMPolicy.Read(&mLocalBytes[prevSize], mBase + prevSize,
+ newSize - prevSize);
+ MOZ_RELEASE_ASSERT(ok);
+ }
+
+ // This function tries to ensure as many bytes as possible up to
+ // |aDesiredLimit| bytes, returning how many bytes were actually ensured.
+ // As EnsureLimit does, we allocate an extra byte in local to make sure
+ // mLocalBytes always has at least one byte even though the target memory
+ // was inaccessible at all.
+ uint32_t TryEnsureLimit(uint32_t aDesiredLimit) {
+ size_t prevSize = mLocalBytes.length();
+ if (aDesiredLimit < prevSize) {
+ return aDesiredLimit;
+ }
+
+ size_t newSize = aDesiredLimit;
+ if (newSize < kInlineStorage) {
+ // Always try to read as much memory as we can at once
+ newSize = kInlineStorage;
+ }
+
+ bool resizeOk = mLocalBytes.resize(newSize);
+ MOZ_RELEASE_ASSERT(resizeOk);
+
+ size_t bytesRead = mMMPolicy.TryRead(&mLocalBytes[prevSize],
+ mBase + prevSize, newSize - prevSize);
+
+ newSize = prevSize + bytesRead;
+
+ resizeOk = mLocalBytes.resize(newSize + 1);
+ MOZ_RELEASE_ASSERT(resizeOk);
+
+ mLocalBytes[newSize] = 0;
+ return newSize;
+ }
+
+ bool IsValidAtOffset(const int8_t aOffset) const {
+ if (!aOffset) {
+ return true;
+ }
+
+ uintptr_t base = reinterpret_cast<uintptr_t>(mBase);
+ uintptr_t adjusted = base + aOffset;
+ uint32_t pageSize = mMMPolicy.GetPageSize();
+
+ // If |adjusted| is within the same page as |mBase|, we're still valid
+ if ((base / pageSize) == (adjusted / pageSize)) {
+ return true;
+ }
+
+ // Otherwise, let's query |adjusted|
+ return mMMPolicy.IsPageAccessible(adjusted);
+ }
+
+ /**
+ * This returns a pointer to a *potentially local copy* of the target
+ * function's bytes. The returned pointer should not be used for any
+ * pointer arithmetic relating to the target function.
+ */
+ const uint8_t* GetLocalBytes() const {
+ if (mLocalBytes.empty()) {
+ return nullptr;
+ }
+
+ return mLocalBytes.begin();
+ }
+
+ /**
+ * This returns a pointer to the target function's bytes. The returned pointer
+ * may possibly belong to another process, so while it should be used for
+ * pointer arithmetic, it *must not* be dereferenced.
+ */
+ uintptr_t GetBase() const { return reinterpret_cast<uintptr_t>(mBase); }
+
+ const MMPolicyOutOfProcess& GetMMPolicy() const { return mMMPolicy; }
+
+ ReadOnlyTargetBytes& operator=(const ReadOnlyTargetBytes&) = delete;
+ ReadOnlyTargetBytes& operator=(ReadOnlyTargetBytes&&) = delete;
+
+ private:
+ // In an ideal world, we'd only read 5 bytes on 32-bit and 13 bytes on 64-bit,
+ // to match the minimum bytes that we need to write in in order to patch the
+ // target function. Since the actual opcodes will often require us to pull in
+ // extra bytes above that minimum, we set the inline storage to be larger than
+ // those minima in an effort to give the Vector extra wiggle room before it
+ // needs to touch the heap.
+#if defined(_M_IX86)
+ static const size_t kInlineStorage = 16;
+#elif defined(_M_X64) || defined(_M_ARM64)
+ static const size_t kInlineStorage = 32;
+#endif
+
+ const MMPolicyOutOfProcess& mMMPolicy;
+ Vector<uint8_t, kInlineStorage> mLocalBytes;
+ uint8_t const* const mBase;
+};
+
+template <typename MMPolicy>
+class TargetBytesPtr {
+ public:
+ typedef TargetBytesPtr<MMPolicy> Type;
+
+ static Type Make(const MMPolicy& aMMPolicy, const void* aFunc) {
+ return TargetBytesPtr(aMMPolicy, aFunc);
+ }
+
+ static Type CopyFromOffset(const TargetBytesPtr& aOther,
+ const uint32_t aOffsetFromOther) {
+ return TargetBytesPtr(aOther, aOffsetFromOther);
+ }
+
+ ReadOnlyTargetBytes<MMPolicy>* operator->() { return &mTargetBytes; }
+
+ TargetBytesPtr(TargetBytesPtr&& aOther)
+ : mTargetBytes(std::move(aOther.mTargetBytes)) {}
+
+ TargetBytesPtr(const TargetBytesPtr& aOther)
+ : mTargetBytes(aOther.mTargetBytes) {}
+
+ TargetBytesPtr& operator=(const TargetBytesPtr&) = delete;
+ TargetBytesPtr& operator=(TargetBytesPtr&&) = delete;
+
+ private:
+ TargetBytesPtr(const MMPolicy& aMMPolicy, const void* aFunc)
+ : mTargetBytes(aMMPolicy, aFunc) {}
+
+ TargetBytesPtr(const TargetBytesPtr& aOther, const uint32_t aOffsetFromOther)
+ : mTargetBytes(aOther.mTargetBytes, aOffsetFromOther) {}
+
+ ReadOnlyTargetBytes<MMPolicy> mTargetBytes;
+};
+
+template <>
+class TargetBytesPtr<MMPolicyOutOfProcess> {
+ public:
+ typedef std::shared_ptr<ReadOnlyTargetBytes<MMPolicyOutOfProcess>> Type;
+
+ static Type Make(const MMPolicyOutOfProcess& aMMPolicy, const void* aFunc) {
+ return std::make_shared<ReadOnlyTargetBytes<MMPolicyOutOfProcess>>(
+ aMMPolicy, aFunc);
+ }
+
+ static Type CopyFromOffset(const Type& aOther,
+ const uint32_t aOffsetFromOther) {
+ return std::make_shared<ReadOnlyTargetBytes<MMPolicyOutOfProcess>>(
+ *aOther, aOffsetFromOther);
+ }
+};
+
+template <typename MMPolicy>
+class MOZ_STACK_CLASS ReadOnlyTargetFunction final {
+ public:
+ ReadOnlyTargetFunction(const MMPolicy& aMMPolicy, const void* aFunc)
+ : mTargetBytes(TargetBytesPtr<MMPolicy>::Make(aMMPolicy, aFunc)),
+ mOffset(0) {}
+
+ ReadOnlyTargetFunction(const MMPolicy& aMMPolicy, FARPROC aFunc)
+ : mTargetBytes(TargetBytesPtr<MMPolicy>::Make(
+ aMMPolicy, reinterpret_cast<const void*>(aFunc))),
+ mOffset(0) {}
+
+ ReadOnlyTargetFunction(const MMPolicy& aMMPolicy, uintptr_t aFunc)
+ : mTargetBytes(TargetBytesPtr<MMPolicy>::Make(
+ aMMPolicy, reinterpret_cast<const void*>(aFunc))),
+ mOffset(0) {}
+
+ ReadOnlyTargetFunction(ReadOnlyTargetFunction&& aOther)
+ : mTargetBytes(std::move(aOther.mTargetBytes)), mOffset(aOther.mOffset) {}
+
+ ReadOnlyTargetFunction& operator=(const ReadOnlyTargetFunction&) = delete;
+ ReadOnlyTargetFunction& operator=(ReadOnlyTargetFunction&&) = delete;
+
+ ~ReadOnlyTargetFunction() = default;
+
+ ReadOnlyTargetFunction operator+(const uint32_t aOffset) const {
+ return ReadOnlyTargetFunction(*this, mOffset + aOffset);
+ }
+
+ uintptr_t GetBaseAddress() const { return mTargetBytes->GetBase(); }
+
+ uintptr_t GetAddress() const { return mTargetBytes->GetBase() + mOffset; }
+
+ uintptr_t AsEncodedPtr() const {
+ return EncodePtr(
+ reinterpret_cast<void*>(mTargetBytes->GetBase() + mOffset));
+ }
+
+ static uintptr_t EncodePtr(void* aPtr) {
+ return reinterpret_cast<uintptr_t>(::EncodePointer(aPtr));
+ }
+
+ static uintptr_t DecodePtr(uintptr_t aEncodedPtr) {
+ return reinterpret_cast<uintptr_t>(
+ ::DecodePointer(reinterpret_cast<PVOID>(aEncodedPtr)));
+ }
+
+ bool IsValidAtOffset(const int8_t aOffset) const {
+ return mTargetBytes->IsValidAtOffset(aOffset);
+ }
+
+#if defined(_M_ARM64)
+
+ uint32_t ReadNextInstruction() {
+ mTargetBytes->EnsureLimit(mOffset + sizeof(uint32_t));
+ uint32_t instruction = *reinterpret_cast<const uint32_t*>(
+ mTargetBytes->GetLocalBytes() + mOffset);
+ mOffset += sizeof(uint32_t);
+ return instruction;
+ }
+
+ bool BackUpOneInstruction() {
+ if (mOffset < sizeof(uint32_t)) {
+ return false;
+ }
+
+ mOffset -= sizeof(uint32_t);
+ return true;
+ }
+
+#else
+
+ uint8_t const& operator*() const {
+ mTargetBytes->EnsureLimit(mOffset);
+ return *(mTargetBytes->GetLocalBytes() + mOffset);
+ }
+
+ uint8_t const& operator[](uint32_t aIndex) const {
+ mTargetBytes->EnsureLimit(mOffset + aIndex);
+ return *(mTargetBytes->GetLocalBytes() + mOffset + aIndex);
+ }
+
+ ReadOnlyTargetFunction& operator++() {
+ ++mOffset;
+ return *this;
+ }
+
+ ReadOnlyTargetFunction& operator+=(uint32_t aDelta) {
+ mOffset += aDelta;
+ return *this;
+ }
+
+ uintptr_t ReadDisp32AsAbsolute() {
+ mTargetBytes->EnsureLimit(mOffset + sizeof(int32_t));
+ int32_t disp = *reinterpret_cast<const int32_t*>(
+ mTargetBytes->GetLocalBytes() + mOffset);
+ uintptr_t result =
+ mTargetBytes->GetBase() + mOffset + sizeof(int32_t) + disp;
+ mOffset += sizeof(int32_t);
+ return result;
+ }
+
+ bool IsRelativeShortJump(uintptr_t* aOutTarget) {
+ if ((*this)[0] == 0xeb) {
+ int8_t offset = static_cast<int8_t>((*this)[1]);
+ *aOutTarget = GetAddress() + 2 + offset;
+ return true;
+ }
+ return false;
+ }
+
+# if defined(_M_X64)
+ // Currently this function is used only in x64.
+ bool IsRelativeNearJump(uintptr_t* aOutTarget) {
+ if ((*this)[0] == 0xe9) {
+ *aOutTarget = (*this + 1).ReadDisp32AsAbsolute();
+ return true;
+ }
+ return false;
+ }
+# endif // defined(_M_X64)
+
+ bool IsIndirectNearJump(uintptr_t* aOutTarget) {
+ if ((*this)[0] == 0xff && (*this)[1] == 0x25) {
+# if defined(_M_X64)
+ *aOutTarget = (*this + 2).ChasePointerFromDisp();
+# else
+ *aOutTarget = (*this + 2).template ChasePointer<uintptr_t*>();
+# endif // defined(_M_X64)
+ return true;
+ }
+# if defined(_M_X64)
+ else if ((*this)[0] == 0x48 && (*this)[1] == 0xff && (*this)[2] == 0x25) {
+ // According to Intel SDM, JMP does not have REX.W except JMP m16:64,
+ // but CPU can execute JMP r/m32 with REX.W. We handle it just in case.
+ *aOutTarget = (*this + 3).ChasePointerFromDisp();
+ return true;
+ }
+# endif // defined(_M_X64)
+ return false;
+ }
+
+#endif // defined(_M_ARM64)
+
+ void Rewind() { mOffset = 0; }
+
+ uint32_t GetOffset() const { return mOffset; }
+
+ uintptr_t OffsetToAbsolute(const uint8_t aOffset) const {
+ return mTargetBytes->GetBase() + mOffset + aOffset;
+ }
+
+ uintptr_t GetCurrentAbsolute() const { return OffsetToAbsolute(0); }
+
+ /**
+ * This method promotes the code referenced by this object to be writable.
+ *
+ * @param aLen The length of the function's code to make writable. If set
+ * to zero, this object's current offset is used as the length.
+ * @param aOffset The result's base address will be offset from this
+ * object's base address by |aOffset| bytes. This value may be
+ * negative.
+ */
+ WritableTargetFunction<MMPolicy> Promote(const uint32_t aLen = 0,
+ const int8_t aOffset = 0) const {
+ const uint32_t effectiveLength = aLen ? aLen : mOffset;
+ MOZ_RELEASE_ASSERT(effectiveLength,
+ "Cannot Promote a zero-length function");
+
+ if (!mTargetBytes->IsValidAtOffset(aOffset)) {
+ return WritableTargetFunction<MMPolicy>(mTargetBytes->GetMMPolicy());
+ }
+
+ WritableTargetFunction<MMPolicy> result(mTargetBytes->GetMMPolicy(),
+ mTargetBytes->GetBase() + aOffset,
+ effectiveLength);
+
+ return result;
+ }
+
+ private:
+ template <typename T>
+ struct ChasePointerHelper {
+ template <typename MMPolicy_>
+ static T Result(const MMPolicy_&, T aValue) {
+ return aValue;
+ }
+ };
+
+ template <typename T>
+ struct ChasePointerHelper<T*> {
+ template <typename MMPolicy_>
+ static auto Result(const MMPolicy_& aPolicy, T* aValue) {
+ ReadOnlyTargetFunction<MMPolicy_> ptr(aPolicy, aValue);
+ return ptr.template ChasePointer<T>();
+ }
+ };
+
+ public:
+ // Keep chasing pointers until T is not a pointer type anymore
+ template <typename T>
+ auto ChasePointer() {
+ mTargetBytes->EnsureLimit(mOffset + sizeof(T));
+ const std::remove_cv_t<T> result =
+ *reinterpret_cast<const std::remove_cv_t<T>*>(
+ mTargetBytes->GetLocalBytes() + mOffset);
+ return ChasePointerHelper<std::remove_cv_t<T>>::Result(
+ mTargetBytes->GetMMPolicy(), result);
+ }
+
+ uintptr_t ChasePointerFromDisp() {
+ uintptr_t ptrFromDisp = ReadDisp32AsAbsolute();
+ ReadOnlyTargetFunction<MMPolicy> ptr(
+ mTargetBytes->GetMMPolicy(),
+ reinterpret_cast<const void*>(ptrFromDisp));
+ return ptr.template ChasePointer<uintptr_t>();
+ }
+
+ private:
+ ReadOnlyTargetFunction(const ReadOnlyTargetFunction& aOther)
+ : mTargetBytes(aOther.mTargetBytes), mOffset(aOther.mOffset) {}
+
+ ReadOnlyTargetFunction(const ReadOnlyTargetFunction& aOther,
+ const uint32_t aOffsetFromOther)
+ : mTargetBytes(TargetBytesPtr<MMPolicy>::CopyFromOffset(
+ aOther.mTargetBytes, aOffsetFromOther)),
+ mOffset(0) {}
+
+ private:
+ mutable typename TargetBytesPtr<MMPolicy>::Type mTargetBytes;
+ uint32_t mOffset;
+};
+
+template <typename MMPolicy, typename T>
+class MOZ_STACK_CLASS TargetObject {
+ mutable typename TargetBytesPtr<MMPolicy>::Type mTargetBytes;
+
+ TargetObject(const MMPolicy& aMMPolicy, const void* aBaseAddress)
+ : mTargetBytes(TargetBytesPtr<MMPolicy>::Make(aMMPolicy, aBaseAddress)) {
+ mTargetBytes->EnsureLimit(sizeof(T));
+ }
+
+ public:
+ explicit TargetObject(const MMPolicy& aMMPolicy)
+ : mTargetBytes(TargetBytesPtr<MMPolicy>::Make(aMMPolicy, nullptr)) {}
+
+ TargetObject(const MMPolicy& aMMPolicy, uintptr_t aBaseAddress)
+ : TargetObject(aMMPolicy, reinterpret_cast<const void*>(aBaseAddress)) {}
+
+ TargetObject(const TargetObject&) = delete;
+ TargetObject(TargetObject&&) = delete;
+ TargetObject& operator=(const TargetObject&) = delete;
+ TargetObject& operator=(TargetObject&&) = delete;
+
+ explicit operator bool() const {
+ return mTargetBytes->GetBase() && mTargetBytes->GetLocalBytes();
+ }
+
+ const T* operator->() const {
+ return reinterpret_cast<const T*>(mTargetBytes->GetLocalBytes());
+ }
+
+ const T* GetLocalBase() const {
+ return reinterpret_cast<const T*>(mTargetBytes->GetLocalBytes());
+ }
+};
+
+template <typename MMPolicy, typename T>
+class MOZ_STACK_CLASS TargetObjectArray {
+ mutable typename TargetBytesPtr<MMPolicy>::Type mTargetBytes;
+ size_t mNumOfItems;
+
+ TargetObjectArray(const MMPolicy& aMMPolicy, const void* aBaseAddress,
+ size_t aNumOfItems)
+ : mTargetBytes(TargetBytesPtr<MMPolicy>::Make(aMMPolicy, aBaseAddress)),
+ mNumOfItems(aNumOfItems) {
+ uint32_t itemsRead =
+ mTargetBytes->TryEnsureLimit(sizeof(T) * mNumOfItems) / sizeof(T);
+ // itemsRead may be bigger than the requested amount because of buffering,
+ // but mNumOfItems should not include extra bytes of buffering.
+ if (itemsRead < mNumOfItems) {
+ mNumOfItems = itemsRead;
+ }
+ }
+
+ const T* GetLocalBase() const {
+ return reinterpret_cast<const T*>(mTargetBytes->GetLocalBytes());
+ }
+
+ public:
+ explicit TargetObjectArray(const MMPolicy& aMMPolicy)
+ : mTargetBytes(TargetBytesPtr<MMPolicy>::Make(aMMPolicy, nullptr)),
+ mNumOfItems(0) {}
+
+ TargetObjectArray(const MMPolicy& aMMPolicy, uintptr_t aBaseAddress,
+ size_t aNumOfItems)
+ : TargetObjectArray(aMMPolicy,
+ reinterpret_cast<const void*>(aBaseAddress),
+ aNumOfItems) {}
+
+ TargetObjectArray(const TargetObjectArray&) = delete;
+ TargetObjectArray(TargetObjectArray&&) = delete;
+ TargetObjectArray& operator=(const TargetObjectArray&) = delete;
+ TargetObjectArray& operator=(TargetObjectArray&&) = delete;
+
+ explicit operator bool() const {
+ return mTargetBytes->GetBase() && mNumOfItems;
+ }
+
+ const T* operator[](size_t aIndex) const {
+ if (aIndex >= mNumOfItems) {
+ return nullptr;
+ }
+
+ return &GetLocalBase()[aIndex];
+ }
+
+ template <typename Comparator>
+ bool BinarySearchIf(const Comparator& aCompare,
+ size_t* aMatchOrInsertionPoint) const {
+ return mozilla::BinarySearchIf(GetLocalBase(), 0, mNumOfItems, aCompare,
+ aMatchOrInsertionPoint);
+ }
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // mozilla_interceptor_TargetFunction_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/Trampoline.h b/toolkit/xre/dllservices/mozglue/interceptor/Trampoline.h
new file mode 100644
index 0000000000..befbd47215
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/Trampoline.h
@@ -0,0 +1,773 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_Trampoline_h
+#define mozilla_interceptor_Trampoline_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Types.h"
+#include "mozilla/WindowsProcessMitigations.h"
+#include "mozilla/WindowsUnwindInfo.h"
+
+namespace mozilla {
+namespace interceptor {
+
+template <typename MMPolicy>
+class MOZ_STACK_CLASS Trampoline final {
+ public:
+ Trampoline(const MMPolicy* aMMPolicy, uint8_t* const aLocalBase,
+ const uintptr_t aRemoteBase, const uint32_t aChunkSize)
+ : mMMPolicy(aMMPolicy),
+ mPrevLocalProt(0),
+ mLocalBase(aLocalBase),
+ mRemoteBase(aRemoteBase),
+ mOffset(0),
+ mExeOffset(0),
+#ifdef _M_X64
+ mCopyCodesEndOffset(0),
+ mExeEndOffset(0),
+#endif // _M_X64
+ mMaxOffset(aChunkSize),
+ mAccumulatedStatus(true) {
+ if (!::VirtualProtect(aLocalBase, aChunkSize,
+ MMPolicy::GetTrampWriteProtFlags(),
+ &mPrevLocalProt)) {
+ mPrevLocalProt = 0;
+ }
+ }
+
+ Trampoline(Trampoline&& aOther)
+ : mMMPolicy(aOther.mMMPolicy),
+ mPrevLocalProt(aOther.mPrevLocalProt),
+ mLocalBase(aOther.mLocalBase),
+ mRemoteBase(aOther.mRemoteBase),
+ mOffset(aOther.mOffset),
+ mExeOffset(aOther.mExeOffset),
+#ifdef _M_X64
+ mCopyCodesEndOffset(aOther.mCopyCodesEndOffset),
+ mExeEndOffset(aOther.mExeEndOffset),
+#endif // _M_X64
+ mMaxOffset(aOther.mMaxOffset),
+ mAccumulatedStatus(aOther.mAccumulatedStatus) {
+ aOther.mPrevLocalProt = 0;
+ aOther.mAccumulatedStatus = false;
+ }
+
+ MOZ_IMPLICIT Trampoline(decltype(nullptr))
+ : mMMPolicy(nullptr),
+ mPrevLocalProt(0),
+ mLocalBase(nullptr),
+ mRemoteBase(0),
+ mOffset(0),
+ mExeOffset(0),
+#ifdef _M_X64
+ mCopyCodesEndOffset(0),
+ mExeEndOffset(0),
+#endif // _M_X64
+ mMaxOffset(0),
+ mAccumulatedStatus(false) {
+ }
+
+ Trampoline(const Trampoline&) = delete;
+ Trampoline& operator=(const Trampoline&) = delete;
+
+ Trampoline& operator=(Trampoline&& aOther) {
+ Clear();
+
+ mMMPolicy = aOther.mMMPolicy;
+ mPrevLocalProt = aOther.mPrevLocalProt;
+ mLocalBase = aOther.mLocalBase;
+ mRemoteBase = aOther.mRemoteBase;
+ mOffset = aOther.mOffset;
+ mExeOffset = aOther.mExeOffset;
+#ifdef _M_X64
+ mCopyCodesEndOffset = aOther.mCopyCodesEndOffset;
+ mExeEndOffset = aOther.mExeEndOffset;
+#endif // _M_X64
+ mMaxOffset = aOther.mMaxOffset;
+ mAccumulatedStatus = aOther.mAccumulatedStatus;
+
+ aOther.mPrevLocalProt = 0;
+ aOther.mAccumulatedStatus = false;
+
+ return *this;
+ }
+
+ ~Trampoline() { Clear(); }
+
+ explicit operator bool() const {
+ return IsNull() ||
+ (mLocalBase && mRemoteBase && mPrevLocalProt && mAccumulatedStatus);
+ }
+
+ bool IsNull() const { return !mMMPolicy; }
+
+#if defined(_M_ARM64)
+
+ void WriteInstruction(uint32_t aInstruction) {
+ const uint32_t kDelta = sizeof(uint32_t);
+
+ if (!mMMPolicy) {
+ // Null tramp, just track offset
+ mOffset += kDelta;
+ return;
+ }
+
+ if (mOffset + kDelta > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ *reinterpret_cast<uint32_t*>(mLocalBase + mOffset) = aInstruction;
+ mOffset += kDelta;
+ }
+
+ void WriteLoadLiteral(const uintptr_t aAddress, const uint8_t aReg) {
+ const uint32_t kDelta = sizeof(uint32_t) + sizeof(uintptr_t);
+
+ if (!mMMPolicy) {
+ // Null tramp, just track offset
+ mOffset += kDelta;
+ return;
+ }
+
+ // We grow the literal pool from the *end* of the tramp,
+ // so we need to ensure that there is enough room for both an instruction
+ // and a pointer
+ if (mOffset + kDelta > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ mMaxOffset -= sizeof(uintptr_t);
+ *reinterpret_cast<uintptr_t*>(mLocalBase + mMaxOffset) = aAddress;
+
+ CheckedInt<intptr_t> pc(GetCurrentRemoteAddress());
+ if (!pc.isValid()) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ CheckedInt<intptr_t> literal(reinterpret_cast<uintptr_t>(mLocalBase) +
+ mMaxOffset);
+ if (!literal.isValid()) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ CheckedInt<intptr_t> ptrOffset = (literal - pc);
+ if (!ptrOffset.isValid()) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ // ptrOffset must be properly aligned
+ MOZ_ASSERT((ptrOffset.value() % 4) == 0);
+ ptrOffset /= 4;
+
+ CheckedInt<int32_t> offset(ptrOffset.value());
+ if (!offset.isValid()) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ // Ensure that offset falls within the range of a signed 19-bit value
+ if (offset.value() < -0x40000 || offset.value() > 0x3FFFF) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ const int32_t kimm19Mask = 0x7FFFF;
+ int32_t masked = offset.value() & kimm19Mask;
+
+ MOZ_ASSERT(aReg < 32);
+ uint32_t loadInstr = 0x58000000 | (masked << 5) | aReg;
+ WriteInstruction(loadInstr);
+ }
+
+#else
+
+ void WriteByte(uint8_t aValue) {
+ const uint32_t kDelta = sizeof(uint8_t);
+
+ if (!mMMPolicy) {
+ // Null tramp, just track offset
+ mOffset += kDelta;
+ return;
+ }
+
+ if (mOffset >= mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ *(mLocalBase + mOffset) = aValue;
+ ++mOffset;
+ }
+
+ void WriteInteger(int32_t aValue) {
+ const uint32_t kDelta = sizeof(int32_t);
+
+ if (!mMMPolicy) {
+ // Null tramp, just track offset
+ mOffset += kDelta;
+ return;
+ }
+
+ if (mOffset + kDelta > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ *reinterpret_cast<int32_t*>(mLocalBase + mOffset) = aValue;
+ mOffset += kDelta;
+ }
+
+ void WriteDisp32(uintptr_t aAbsTarget) {
+ const uint32_t kDelta = sizeof(int32_t);
+
+ if (!mMMPolicy) {
+ // Null tramp, just track offset
+ mOffset += kDelta;
+ return;
+ }
+
+ if (mOffset + kDelta > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ // This needs to be computed from the remote location
+ intptr_t remoteTrampPosition = static_cast<intptr_t>(mRemoteBase + mOffset);
+
+ intptr_t diff =
+ static_cast<intptr_t>(aAbsTarget) - (remoteTrampPosition + kDelta);
+
+ CheckedInt<int32_t> checkedDisp(diff);
+ MOZ_ASSERT(checkedDisp.isValid());
+ if (!checkedDisp.isValid()) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ int32_t disp = checkedDisp.value();
+ *reinterpret_cast<int32_t*>(mLocalBase + mOffset) = disp;
+ mOffset += kDelta;
+ }
+
+ void WriteBytes(void* aAddr, size_t aSize) {
+ if (!mMMPolicy) {
+ // Null tramp, just track offset
+ mOffset += aSize;
+ return;
+ }
+
+ if (mOffset + aSize > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ std::memcpy(reinterpret_cast<void*>(mLocalBase + mOffset), aAddr, aSize);
+ mOffset += aSize;
+ }
+
+#endif
+
+ void WritePointer(uintptr_t aValue) {
+ const uint32_t kDelta = sizeof(uintptr_t);
+
+ if (!mMMPolicy) {
+ // Null tramp, just track offset
+ mOffset += kDelta;
+ return;
+ }
+
+ if (mOffset + kDelta > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ *reinterpret_cast<uintptr_t*>(mLocalBase + mOffset) = aValue;
+ mOffset += kDelta;
+ }
+
+ void WriteEncodedPointer(void* aValue) {
+ uintptr_t encoded = ReadOnlyTargetFunction<MMPolicy>::EncodePtr(aValue);
+ WritePointer(encoded);
+ }
+
+ Maybe<uintptr_t> ReadPointer() {
+ if (mOffset + sizeof(uintptr_t) > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return Nothing();
+ }
+
+ auto result = Some(*reinterpret_cast<uintptr_t*>(mLocalBase + mOffset));
+ mOffset += sizeof(uintptr_t);
+ return std::move(result);
+ }
+
+ Maybe<uintptr_t> ReadEncodedPointer() {
+ Maybe<uintptr_t> encoded(ReadPointer());
+ if (!encoded) {
+ return encoded;
+ }
+
+ return Some(ReadOnlyTargetFunction<MMPolicy>::DecodePtr(encoded.value()));
+ }
+
+#if defined(_M_IX86)
+ // 32-bit only
+ void AdjustDisp32AtOffset(uint32_t aOffset, uintptr_t aAbsTarget) {
+ uint32_t effectiveOffset = mExeOffset + aOffset;
+
+ if (effectiveOffset + sizeof(int32_t) > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ intptr_t diff = static_cast<intptr_t>(aAbsTarget) -
+ static_cast<intptr_t>(mRemoteBase + mExeOffset);
+ *reinterpret_cast<int32_t*>(mLocalBase + effectiveOffset) += diff;
+ }
+#endif // defined(_M_IX86)
+
+ void CopyFrom(uintptr_t aOrigBytes, uint32_t aNumBytes) {
+ if (!mMMPolicy) {
+ // Null tramp, just track offset
+ mOffset += aNumBytes;
+ return;
+ }
+
+ if (!mMMPolicy || mOffset + aNumBytes > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ if (!mMMPolicy->Read(mLocalBase + mOffset,
+ reinterpret_cast<void*>(aOrigBytes), aNumBytes)) {
+ mAccumulatedStatus = false;
+ return;
+ }
+
+ mOffset += aNumBytes;
+ }
+
+ void CopyCodes(uintptr_t aOrigBytes, uint32_t aNumBytes) {
+#ifdef _M_X64
+ if (mOffset == mCopyCodesEndOffset) {
+ mCopyCodesEndOffset += aNumBytes;
+ }
+#endif // _M_X64
+ CopyFrom(aOrigBytes, aNumBytes);
+ }
+
+ void Rewind() { mOffset = 0; }
+
+ uintptr_t GetCurrentRemoteAddress() const { return mRemoteBase + mOffset; }
+
+ void StartExecutableCode() {
+ MOZ_ASSERT(!mExeOffset);
+ mExeOffset = mOffset;
+#ifdef _M_X64
+ mCopyCodesEndOffset = mOffset;
+#endif // _M_X64
+ }
+
+ void* EndExecutableCode() {
+ if (!mAccumulatedStatus || !mMMPolicy) {
+ return nullptr;
+ }
+
+#ifdef _M_X64
+ mExeEndOffset = mOffset;
+#endif // _M_X64
+
+ // This must always return the start address the executable code
+ // *in the target process*
+ return reinterpret_cast<void*>(mRemoteBase + mExeOffset);
+ }
+
+ uint32_t GetCurrentExecutableCodeLen() const { return mOffset - mExeOffset; }
+
+#ifdef _M_X64
+
+ void Align(uint32_t aAlignment) {
+ // aAlignment should be a power of 2
+ MOZ_ASSERT(!(aAlignment & (aAlignment - 1)));
+
+ uint32_t alignedOffset = (mOffset + aAlignment - 1) & ~(aAlignment - 1);
+ if (alignedOffset > mMaxOffset) {
+ mAccumulatedStatus = false;
+ return;
+ }
+ mOffset = alignedOffset;
+ }
+
+ // We assume that all instructions that are part of the prologue are left
+ // intact by detouring code, i.e. that they are copied using CopyCodes. This
+ // is not true for calls and jumps for example, but calls and jumps cannot be
+ // part of the prologue. This assumption allows us to copy unwind information
+ // as-is, because unwind information only refers to instructions within the
+ // prologue.
+ bool AddUnwindInfo(uintptr_t aOrigFuncAddr, uintptr_t aOrigFuncStopOffset) {
+ if constexpr (!MMPolicy::kSupportsUnwindInfo) {
+ return false;
+ }
+
+ if (!mMMPolicy) {
+ return false;
+ }
+
+ uint32_t origFuncOffsetFromBeginAddr = 0;
+ uint32_t origFuncOffsetToEndAddr = 0;
+ uintptr_t origImageBase = 0;
+ auto unwindInfoData =
+ mMMPolicy->LookupUnwindInfo(aOrigFuncAddr, &origFuncOffsetFromBeginAddr,
+ &origFuncOffsetToEndAddr, &origImageBase);
+ if (!unwindInfoData) {
+ // If the original function does not have unwind info, there is nothing
+ // more to do.
+ return true;
+ }
+
+ // We do not support hooking at a location that isn't the beginning of a
+ // function.
+ MOZ_ASSERT(origFuncOffsetFromBeginAddr == 0);
+ if (origFuncOffsetFromBeginAddr != 0) {
+ return false;
+ }
+
+ IterableUnwindInfo unwindInfoIt(unwindInfoData.get());
+ auto& unwindInfo = unwindInfoIt.Info();
+
+ // The prologue should contain only instructions that we detour using
+ // CopyCodes. If not, there is most likely a mismatch between the unwind
+ // information and the actual code we are detouring, so we stop here. This
+ // is a best-effort safeguard intended to detect situations where e.g.
+ // third-party injected code would have altered the function we are
+ // detouring.
+ if (mCopyCodesEndOffset < aOrigFuncStopOffset &&
+ unwindInfo.size_of_prolog > mCopyCodesEndOffset) {
+ return false;
+ }
+
+ // According to the documentation, the array is sorted by descending order
+ // of offset in the prologue. Let's double check this assumption if in
+ // debug. This also checks that the full unwind information isn't
+ // ill-formed, thanks to all the MOZ_ASSERT in iteration code.
+# ifdef DEBUG
+ uint8_t previousOffset = 0xFF;
+ for (const auto& unwindCode : unwindInfoIt) {
+ MOZ_ASSERT(unwindCode.offset_in_prolog <= previousOffset);
+ previousOffset = unwindCode.offset_in_prolog;
+ }
+# endif // DEBUG
+
+ // We skip entries that are not part of the code we have detoured.
+ // This code relies on the array being sorted by descending order of offset
+ // in the prolog.
+ uint8_t firstRelevantCode = 0;
+ uint8_t countOfCodes = 0;
+ auto it = unwindInfoIt.begin();
+ for (; it != unwindInfoIt.end(); ++it) {
+ const auto& unwindCode = *it;
+ if (unwindCode.offset_in_prolog <= aOrigFuncStopOffset) {
+ // Found a relevant entry
+ firstRelevantCode = it.Index();
+ countOfCodes = unwindInfo.count_of_codes - firstRelevantCode;
+ break;
+ }
+ }
+
+ // Check that we encountered no ill-formed unwind codes.
+ if (!it.IsValid() && !it.IsAtEnd()) {
+ return false;
+ }
+
+ // We do not support chained unwind info. We should add support for chained
+ // unwind info if we ever reach this assert. Since we hook functions at
+ // their start address, this should not happen.
+ if (unwindInfo.flags & UNW_FLAG_CHAININFO) {
+ MOZ_ASSERT(
+ false,
+ "Tried to detour at a location with chained unwind information");
+ return false;
+ }
+
+ // We do not support exception handler info either. This could be a problem
+ // if we detour code that does not belong to the prologue and contains a
+ // call instruction, as this handler would then not be found if unwinding
+ // from callees. The following assert checks that this does not happen.
+ //
+ // Our current assumption is that all the functions we hook either have no
+ // associated exception handlers, or it is __GSHandlerCheck. This handler
+ // is the most commonly found, for example it is present in LdrLoadDll,
+ // SendMessageTimeoutW, GetWindowInfo. It is added to functions that use
+ // stack buffers, in order to mitigate stack buffer overflows. We explain
+ // below why it is not a problem that we do not preserve __GSHandlerCheck
+ // information when we detour code.
+ //
+ // Preserving exception handler information would raise two challenges:
+ //
+ // (1) if the exception handler was not written in a generic way, it may
+ // behave differently when called for our detoured code compared to
+ // what it would do if called from the original location of the code;
+ // (2) the exception handler can be followed by handler-specific data,
+ // which we cannot copy because we do not know its size.
+ //
+ // __GSHandlerCheck checks that the stack cookie value wasn't overwritten
+ // before continuing to unwind and call further handlers. That is a
+ // security feature that we want to preserve. However, since these
+ // functions allocate stack space and write the stack cookie as part of
+ // their prologue, the 13 bytes that we detour are necessarily part of
+ // their prologue, which must contain at least the following instructions:
+ //
+ // 48 81 ec XX XX XX XX sub rsp, 0xXXXXXXXX
+ // 48 8b 05 XX XX XX XX mov rax, qword ptr [rip+__security_cookie]
+ // 48 33 c4 xor rax, rsp
+ // 48 89 84 24 XX XX XX XX mov qword ptr [RSP + 0xXXXXXXXX],RAX
+ //
+ // As a consequence, code associated with __GSHandlerCheck will necessarily
+ // satisfy (aOrigFuncStopOffset <= unwindInfo.size_of_prolog), and it is OK
+ // to not preserve handler info in that case.
+# ifdef DEBUG
+ if (aOrigFuncStopOffset > unwindInfo.size_of_prolog) {
+ MOZ_ASSERT(!(unwindInfo.flags & (UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER)));
+ }
+# endif // DEBUG
+
+ // The unwind info must be DWORD-aligned
+ Align(sizeof(uint32_t));
+ if (!mAccumulatedStatus) {
+ return false;
+ }
+ uintptr_t unwindInfoOffset = mOffset;
+
+ unwindInfo.flags &=
+ ~(UNW_FLAG_CHAININFO | UNW_FLAG_EHANDLER | UNW_FLAG_UHANDLER);
+ unwindInfo.count_of_codes = countOfCodes;
+ if (aOrigFuncStopOffset < unwindInfo.size_of_prolog) {
+ unwindInfo.size_of_prolog = aOrigFuncStopOffset;
+ }
+
+ WriteBytes(reinterpret_cast<void*>(&unwindInfo),
+ offsetof(UnwindInfo, unwind_code));
+ if (!mAccumulatedStatus) {
+ return false;
+ }
+
+ WriteBytes(
+ reinterpret_cast<void*>(&unwindInfo.unwind_code[firstRelevantCode]),
+ countOfCodes * sizeof(UnwindCode));
+ if (!mAccumulatedStatus) {
+ return false;
+ }
+
+ // The function table must be DWORD-aligned
+ Align(sizeof(uint32_t));
+ if (!mAccumulatedStatus) {
+ return false;
+ }
+ uintptr_t functionTableOffset = mOffset;
+
+ WriteInteger(mExeOffset);
+ if (!mAccumulatedStatus) {
+ return false;
+ }
+
+ WriteInteger(mExeEndOffset);
+ if (!mAccumulatedStatus) {
+ return false;
+ }
+
+ WriteInteger(unwindInfoOffset);
+ if (!mAccumulatedStatus) {
+ return false;
+ }
+
+ return mMMPolicy->AddFunctionTable(mRemoteBase + functionTableOffset, 1,
+ mRemoteBase);
+ }
+
+#endif // _M_X64
+
+ Trampoline<MMPolicy>& operator--() {
+ MOZ_ASSERT(mOffset);
+ --mOffset;
+ return *this;
+ }
+
+ private:
+ void Clear() {
+ if (!mLocalBase || !mPrevLocalProt) {
+ return;
+ }
+
+ DebugOnly<bool> ok = !!::VirtualProtect(mLocalBase, mMaxOffset,
+ mPrevLocalProt, &mPrevLocalProt);
+ MOZ_ASSERT(ok);
+
+ mLocalBase = nullptr;
+ mRemoteBase = 0;
+ mPrevLocalProt = 0;
+ mAccumulatedStatus = false;
+ }
+
+ private:
+ const MMPolicy* mMMPolicy;
+ DWORD mPrevLocalProt;
+ uint8_t* mLocalBase;
+ uintptr_t mRemoteBase;
+ uint32_t mOffset;
+ uint32_t mExeOffset;
+#ifdef _M_X64
+ uint32_t mCopyCodesEndOffset;
+ uint32_t mExeEndOffset;
+#endif // _M_X64
+ uint32_t mMaxOffset;
+ bool mAccumulatedStatus;
+};
+
+template <typename MMPolicy>
+class MOZ_STACK_CLASS TrampolineCollection final {
+ public:
+ class MOZ_STACK_CLASS TrampolineIterator final {
+ public:
+ Trampoline<MMPolicy> operator*() {
+ uint32_t offset = mCurTramp * mCollection.mTrampSize;
+ return Trampoline<MMPolicy>(
+ &mCollection.mMMPolicy, mCollection.mLocalBase + offset,
+ mCollection.mRemoteBase + offset, mCollection.mTrampSize);
+ }
+
+ TrampolineIterator& operator++() {
+ ++mCurTramp;
+ return *this;
+ }
+
+ bool operator!=(const TrampolineIterator& aOther) const {
+ return mCurTramp != aOther.mCurTramp;
+ }
+
+ private:
+ explicit TrampolineIterator(
+ const TrampolineCollection<MMPolicy>& aCollection,
+ const uint32_t aCurTramp = 0)
+ : mCollection(aCollection), mCurTramp(aCurTramp) {}
+
+ const TrampolineCollection<MMPolicy>& mCollection;
+ uint32_t mCurTramp;
+
+ friend class TrampolineCollection<MMPolicy>;
+ };
+
+ explicit TrampolineCollection(const MMPolicy& aMMPolicy)
+ : mMMPolicy(aMMPolicy),
+ mLocalBase(0),
+ mRemoteBase(0),
+ mTrampSize(0),
+ mNumTramps(0),
+ mPrevProt(0),
+ mCS(nullptr) {}
+
+ TrampolineCollection(const MMPolicy& aMMPolicy, uint8_t* const aLocalBase,
+ const uintptr_t aRemoteBase, const uint32_t aTrampSize,
+ const uint32_t aNumTramps)
+ : mMMPolicy(aMMPolicy),
+ mLocalBase(aLocalBase),
+ mRemoteBase(aRemoteBase),
+ mTrampSize(aTrampSize),
+ mNumTramps(aNumTramps),
+ mPrevProt(0),
+ mCS(nullptr) {
+ if (!aNumTramps) {
+ return;
+ }
+
+ BOOL ok = mMMPolicy.Protect(aLocalBase, aNumTramps * aTrampSize,
+ PAGE_EXECUTE_READWRITE, &mPrevProt);
+ if (!ok) {
+ // When destroying a sandboxed process that uses
+ // MITIGATION_DYNAMIC_CODE_DISABLE, we won't be allowed to write to our
+ // executable memory so we just do nothing. If we fail to get access
+ // to memory for any other reason, we still don't want to crash but we
+ // do assert.
+ MOZ_ASSERT(IsDynamicCodeDisabled());
+ mNumTramps = 0;
+ mPrevProt = 0;
+ }
+ }
+
+ ~TrampolineCollection() {
+ if (!mPrevProt) {
+ return;
+ }
+
+ mMMPolicy.Protect(mLocalBase, mNumTramps * mTrampSize, mPrevProt,
+ &mPrevProt);
+
+ if (mCS) {
+ ::LeaveCriticalSection(mCS);
+ }
+ }
+
+ void Lock(CRITICAL_SECTION& aCS) {
+ if (!mPrevProt || mCS) {
+ return;
+ }
+
+ mCS = &aCS;
+ ::EnterCriticalSection(&aCS);
+ }
+
+ TrampolineIterator begin() const {
+ if (!mPrevProt) {
+ return end();
+ }
+
+ return TrampolineIterator(*this);
+ }
+
+ TrampolineIterator end() const {
+ return TrampolineIterator(*this, mNumTramps);
+ }
+
+ TrampolineCollection(const TrampolineCollection&) = delete;
+ TrampolineCollection& operator=(const TrampolineCollection&) = delete;
+ TrampolineCollection& operator=(TrampolineCollection&&) = delete;
+
+ TrampolineCollection(TrampolineCollection&& aOther)
+ : mMMPolicy(aOther.mMMPolicy),
+ mLocalBase(aOther.mLocalBase),
+ mRemoteBase(aOther.mRemoteBase),
+ mTrampSize(aOther.mTrampSize),
+ mNumTramps(aOther.mNumTramps),
+ mPrevProt(aOther.mPrevProt),
+ mCS(aOther.mCS) {
+ aOther.mPrevProt = 0;
+ aOther.mCS = nullptr;
+ }
+
+ private:
+ const MMPolicy& mMMPolicy;
+ uint8_t* const mLocalBase;
+ const uintptr_t mRemoteBase;
+ const uint32_t mTrampSize;
+ uint32_t mNumTramps;
+ uint32_t mPrevProt;
+ CRITICAL_SECTION* mCS;
+
+ friend class TrampolineIterator;
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // mozilla_interceptor_Trampoline_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/VMSharingPolicies.h b/toolkit/xre/dllservices/mozglue/interceptor/VMSharingPolicies.h
new file mode 100644
index 0000000000..8f93f5c1ad
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/VMSharingPolicies.h
@@ -0,0 +1,285 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim: set ts=8 sts=2 et sw=2 tw=80: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at https://mozilla.org/MPL/2.0/. */
+
+#ifndef mozilla_interceptor_VMSharingPolicies_h
+#define mozilla_interceptor_VMSharingPolicies_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Types.h"
+
+namespace mozilla {
+namespace interceptor {
+
+/**
+ * This class is an abstraction of a reservation of virtual address space that
+ * has been obtained from a VMSharingPolicy via the policy's |Reserve| method.
+ *
+ * TrampolinePool allows us to obtain a trampoline without needing to concern
+ * ourselves with the underlying implementation of the VM sharing policy.
+ *
+ * For example, VMSharingPolicyShared delegates to VMSharingPolicyUnique, but
+ * also requires taking a lock before doing so. By invoking |GetNextTrampoline|
+ * on a TrampolinePool, the caller does not need to concern themselves with
+ * that detail.
+ *
+ * We accompolish this with a recursive implementation that provides an inner
+ * TrampolinePool that is provided by the delegated VMSharingPolicy.
+ */
+template <typename VMPolicyT, typename InnerT>
+class MOZ_STACK_CLASS TrampolinePool final {
+ public:
+ TrampolinePool(TrampolinePool&& aOther) = default;
+
+ TrampolinePool(VMPolicyT& aVMPolicy, InnerT&& aInner)
+ : mVMPolicy(aVMPolicy), mInner(std::move(aInner)) {}
+
+ TrampolinePool& operator=(TrampolinePool&& aOther) = delete;
+ TrampolinePool(const TrampolinePool&) = delete;
+ TrampolinePool& operator=(const TrampolinePool&) = delete;
+
+ using MMPolicyT = typename VMPolicyT::MMPolicyT;
+
+ Maybe<Trampoline<MMPolicyT>> GetNextTrampoline() {
+ return mVMPolicy.GetNextTrampoline(mInner);
+ }
+
+#if defined(_M_X64)
+ bool IsInLowest2GB() const {
+ return mVMPolicy.IsTrampolineSpaceInLowest2GB(mInner);
+ }
+#endif // defined(_M_X64)
+
+ private:
+ VMPolicyT& mVMPolicy;
+ InnerT mInner;
+};
+
+/**
+ * This specialization is the base case for TrampolinePool, and is used by
+ * VMSharingPolicyUnique (since that policy does not delegate anything).
+ */
+template <typename VMPolicyT>
+class MOZ_STACK_CLASS TrampolinePool<VMPolicyT, decltype(nullptr)> final {
+ public:
+ explicit TrampolinePool(VMPolicyT& aVMPolicy) : mVMPolicy(aVMPolicy) {}
+
+ TrampolinePool(TrampolinePool&& aOther) = default;
+
+ TrampolinePool& operator=(TrampolinePool&& aOther) = delete;
+ TrampolinePool(const TrampolinePool&) = delete;
+ TrampolinePool& operator=(const TrampolinePool&) = delete;
+
+ using MMPolicyT = typename VMPolicyT::MMPolicyT;
+
+ Maybe<Trampoline<MMPolicyT>> GetNextTrampoline() {
+ return mVMPolicy.GetNextTrampoline();
+ }
+
+#if defined(_M_X64)
+ bool IsInLowest2GB() const {
+ return mVMPolicy.IsTrampolineSpaceInLowest2GB();
+ }
+#endif // defined(_M_X64)
+
+ private:
+ VMPolicyT& mVMPolicy;
+};
+
+template <typename MMPolicy>
+class VMSharingPolicyUnique : public MMPolicy {
+ using ThisType = VMSharingPolicyUnique<MMPolicy>;
+
+ public:
+ using PoolType = TrampolinePool<ThisType, decltype(nullptr)>;
+
+ template <typename... Args>
+ explicit VMSharingPolicyUnique(Args&&... aArgs)
+ : MMPolicy(std::forward<Args>(aArgs)...), mNextChunkIndex(0) {}
+
+ Maybe<PoolType> Reserve(const uintptr_t aPivotAddr,
+ const uint32_t aMaxDistanceFromPivot) {
+ // Win32 allocates VM addresses at a 64KiB granularity, so we might as well
+ // utilize that entire 64KiB reservation.
+ uint32_t len = MMPolicy::GetAllocGranularity();
+
+ Maybe<Span<const uint8_t>> maybeBounds = MMPolicy::SpanFromPivotAndDistance(
+ len, aPivotAddr, aMaxDistanceFromPivot);
+
+ return Reserve(len, maybeBounds);
+ }
+
+ Maybe<PoolType> Reserve(const uint32_t aSize,
+ const Maybe<Span<const uint8_t>>& aBounds) {
+ uint32_t bytesReserved = MMPolicy::Reserve(aSize, aBounds);
+ if (!bytesReserved) {
+ return Nothing();
+ }
+
+ return Some(PoolType(*this));
+ }
+
+ TrampolineCollection<MMPolicy> Items() const {
+ return TrampolineCollection<MMPolicy>(*this, this->GetLocalView(),
+ this->GetRemoteView(), kChunkSize,
+ mNextChunkIndex);
+ }
+
+ void Clear() { mNextChunkIndex = 0; }
+
+ ~VMSharingPolicyUnique() = default;
+
+ VMSharingPolicyUnique(const VMSharingPolicyUnique&) = delete;
+ VMSharingPolicyUnique& operator=(const VMSharingPolicyUnique&) = delete;
+
+ VMSharingPolicyUnique(VMSharingPolicyUnique&& aOther)
+ : MMPolicy(std::move(aOther)), mNextChunkIndex(aOther.mNextChunkIndex) {
+ aOther.mNextChunkIndex = 0;
+ }
+
+ VMSharingPolicyUnique& operator=(VMSharingPolicyUnique&& aOther) {
+ static_cast<MMPolicy&>(*this) = std::move(aOther);
+ mNextChunkIndex = aOther.mNextChunkIndex;
+ aOther.mNextChunkIndex = 0;
+ return *this;
+ }
+
+ protected:
+ // In VMSharingPolicyUnique we do not implement the overload that accepts
+ // an inner trampoline pool, as this policy is expected to be the
+ // implementation of the base case.
+ Maybe<Trampoline<MMPolicy>> GetNextTrampoline() {
+ uint32_t offset = mNextChunkIndex * kChunkSize;
+ if (!this->MaybeCommitNextPage(offset, kChunkSize)) {
+ return Nothing();
+ }
+
+ Trampoline<MMPolicy> result(this, this->GetLocalView() + offset,
+ this->GetRemoteView() + offset, kChunkSize);
+ if (!!result) {
+ ++mNextChunkIndex;
+ }
+
+ return Some(std::move(result));
+ }
+
+ private:
+ uint32_t mNextChunkIndex;
+ static const uint32_t kChunkSize = 128;
+
+ template <typename VMPolicyT, typename FriendT>
+ friend class TrampolinePool;
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+// We don't include RangeMap.h until this point because it depends on the
+// TrampolinePool definitions from above.
+#include "mozilla/interceptor/RangeMap.h"
+
+namespace mozilla {
+namespace interceptor {
+
+// We only support this policy for in-proc MMPolicy.
+class MOZ_TRIVIAL_CTOR_DTOR VMSharingPolicyShared : public MMPolicyInProcess {
+ typedef VMSharingPolicyUnique<MMPolicyInProcess> UniquePolicyT;
+ typedef VMSharingPolicyShared ThisType;
+
+ public:
+ using PoolType = TrampolinePool<ThisType, UniquePolicyT::PoolType>;
+ using MMPolicyT = MMPolicyInProcess;
+
+ constexpr VMSharingPolicyShared() {}
+
+ bool ShouldUnhookUponDestruction() const { return false; }
+
+ Maybe<PoolType> Reserve(const uintptr_t aPivotAddr,
+ const uint32_t aMaxDistanceFromPivot) {
+ // Win32 allocates VM addresses at a 64KiB granularity, so we might as well
+ // utilize that entire 64KiB reservation.
+ uint32_t len = this->GetAllocGranularity();
+
+ Maybe<Span<const uint8_t>> maybeBounds =
+ MMPolicyInProcess::SpanFromPivotAndDistance(len, aPivotAddr,
+ aMaxDistanceFromPivot);
+
+ AutoCriticalSection lock(GetCS());
+ VMSharingPolicyUnique<MMPolicyT>* uniquePol = sVMMap.GetPolicy(maybeBounds);
+ MOZ_ASSERT(uniquePol);
+ if (!uniquePol) {
+ return Nothing();
+ }
+
+ Maybe<UniquePolicyT::PoolType> maybeUnique =
+ uniquePol->Reserve(len, maybeBounds);
+ if (!maybeUnique) {
+ return Nothing();
+ }
+
+ return Some(PoolType(*this, std::move(maybeUnique.ref())));
+ }
+
+ TrampolineCollection<MMPolicyInProcess> Items() const {
+ // Since ShouldUnhookUponDestruction returns false, this can be empty
+ return TrampolineCollection<MMPolicyInProcess>(*this);
+ }
+
+ void Clear() {
+ // This must be a no-op for shared VM policy; we can't have one interceptor
+ // wiping out trampolines for all interceptors in the process.
+ }
+
+ VMSharingPolicyShared(const VMSharingPolicyShared&) = delete;
+ VMSharingPolicyShared(VMSharingPolicyShared&&) = delete;
+ VMSharingPolicyShared& operator=(const VMSharingPolicyShared&) = delete;
+ VMSharingPolicyShared& operator=(VMSharingPolicyShared&&) = delete;
+
+ private:
+ static CRITICAL_SECTION* GetCS() {
+ static const bool isAlloc = []() -> bool {
+ DWORD flags = 0;
+#if defined(RELEASE_OR_BETA)
+ flags |= CRITICAL_SECTION_NO_DEBUG_INFO;
+#endif // defined(RELEASE_OR_BETA)
+ ::InitializeCriticalSectionEx(&sCS, 4000, flags);
+ return true;
+ }();
+ Unused << isAlloc;
+
+ return &sCS;
+ }
+
+ // In VMSharingPolicyShared, we only implement the overload that accepts
+ // a VMSharingPolicyUnique trampoline pool as |aInner|, since we require the
+ // former policy to wrap the latter.
+ Maybe<Trampoline<MMPolicyInProcess>> GetNextTrampoline(
+ UniquePolicyT::PoolType& aInner) {
+ AutoCriticalSection lock(GetCS());
+ return aInner.GetNextTrampoline();
+ }
+
+#if defined(_M_X64)
+ bool IsTrampolineSpaceInLowest2GB(
+ const UniquePolicyT::PoolType& aInner) const {
+ AutoCriticalSection lock(GetCS());
+ return aInner.IsInLowest2GB();
+ }
+#endif // defined(_M_X64)
+
+ private:
+ template <typename VMPolicyT, typename InnerT>
+ friend class TrampolinePool;
+
+ inline static RangeMap<MMPolicyInProcess> sVMMap;
+ inline static CRITICAL_SECTION sCS;
+};
+
+} // namespace interceptor
+} // namespace mozilla
+
+#endif // mozilla_interceptor_VMSharingPolicies_h
diff --git a/toolkit/xre/dllservices/mozglue/interceptor/moz.build b/toolkit/xre/dllservices/mozglue/interceptor/moz.build
new file mode 100644
index 0000000000..561e33b147
--- /dev/null
+++ b/toolkit/xre/dllservices/mozglue/interceptor/moz.build
@@ -0,0 +1,26 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+EXPORTS.mozilla.interceptor += [
+ "Arm64.h",
+ "MMPolicies.h",
+ "PatcherBase.h",
+ "PatcherDetour.h",
+ "PatcherNopSpace.h",
+ "RangeMap.h",
+ "TargetFunction.h",
+ "Trampoline.h",
+ "VMSharingPolicies.h",
+]
+
+if CONFIG["CPU_ARCH"] == "aarch64":
+ Library("interceptor")
+
+ FINAL_LIBRARY = "mozglue"
+
+ UNIFIED_SOURCES += [
+ "Arm64.cpp",
+ ]