summaryrefslogtreecommitdiffstats
path: root/js/src/jit/arm64/vixl
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/jit/arm64/vixl')
-rw-r--r--js/src/jit/arm64/vixl/.clang-format4
-rw-r--r--js/src/jit/arm64/vixl/Assembler-vixl.cpp5145
-rw-r--r--js/src/jit/arm64/vixl/Assembler-vixl.h4327
-rw-r--r--js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h179
-rw-r--r--js/src/jit/arm64/vixl/Constants-vixl.h2694
-rw-r--r--js/src/jit/arm64/vixl/Cpu-Features-vixl.cpp231
-rw-r--r--js/src/jit/arm64/vixl/Cpu-Features-vixl.h397
-rw-r--r--js/src/jit/arm64/vixl/Cpu-vixl.cpp252
-rw-r--r--js/src/jit/arm64/vixl/Cpu-vixl.h241
-rw-r--r--js/src/jit/arm64/vixl/Debugger-vixl.cpp1535
-rw-r--r--js/src/jit/arm64/vixl/Debugger-vixl.h117
-rw-r--r--js/src/jit/arm64/vixl/Decoder-vixl.cpp878
-rw-r--r--js/src/jit/arm64/vixl/Decoder-vixl.h275
-rw-r--r--js/src/jit/arm64/vixl/Disasm-vixl.cpp3519
-rw-r--r--js/src/jit/arm64/vixl/Disasm-vixl.h181
-rw-r--r--js/src/jit/arm64/vixl/Globals-vixl.h272
-rw-r--r--js/src/jit/arm64/vixl/Instructions-vixl.cpp642
-rw-r--r--js/src/jit/arm64/vixl/Instructions-vixl.h805
-rw-r--r--js/src/jit/arm64/vixl/Instrument-vixl.cpp844
-rw-r--r--js/src/jit/arm64/vixl/Instrument-vixl.h109
-rw-r--r--js/src/jit/arm64/vixl/Logic-vixl.cpp4738
-rw-r--r--js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp2027
-rw-r--r--js/src/jit/arm64/vixl/MacroAssembler-vixl.h2516
-rw-r--r--js/src/jit/arm64/vixl/MozAssembler-vixl.cpp610
-rw-r--r--js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h356
-rw-r--r--js/src/jit/arm64/vixl/MozCachingDecoder.h179
-rw-r--r--js/src/jit/arm64/vixl/MozCpu-vixl.cpp226
-rw-r--r--js/src/jit/arm64/vixl/MozInstructions-vixl.cpp211
-rw-r--r--js/src/jit/arm64/vixl/MozSimulator-vixl.cpp1239
-rw-r--r--js/src/jit/arm64/vixl/Platform-vixl.h39
-rw-r--r--js/src/jit/arm64/vixl/README.md7
-rw-r--r--js/src/jit/arm64/vixl/Simulator-Constants-vixl.h140
-rw-r--r--js/src/jit/arm64/vixl/Simulator-vixl.cpp3993
-rw-r--r--js/src/jit/arm64/vixl/Simulator-vixl.h2571
-rw-r--r--js/src/jit/arm64/vixl/Utils-vixl.cpp555
-rw-r--r--js/src/jit/arm64/vixl/Utils-vixl.h1283
36 files changed, 43337 insertions, 0 deletions
diff --git a/js/src/jit/arm64/vixl/.clang-format b/js/src/jit/arm64/vixl/.clang-format
new file mode 100644
index 0000000000..122a79540d
--- /dev/null
+++ b/js/src/jit/arm64/vixl/.clang-format
@@ -0,0 +1,4 @@
+BasedOnStyle: Chromium
+
+# Ignore all comments because they aren't reflowed properly.
+CommentPragmas: "^"
diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.cpp b/js/src/jit/arm64/vixl/Assembler-vixl.cpp
new file mode 100644
index 0000000000..7f6cd0f2da
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.cpp
@@ -0,0 +1,5145 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+
+#include <cmath>
+
+#include "jit/arm64/vixl/MacroAssembler-vixl.h"
+
+namespace vixl {
+
+// CPURegList utilities.
+CPURegister CPURegList::PopLowestIndex() {
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountTrailingZeros(list_);
+ VIXL_ASSERT((1ULL << index) & list_);
+ Remove(index);
+ return CPURegister(index, size_, type_);
+}
+
+
+CPURegister CPURegList::PopHighestIndex() {
+ VIXL_ASSERT(IsValid());
+ if (IsEmpty()) {
+ return NoCPUReg;
+ }
+ int index = CountLeadingZeros(list_);
+ index = kRegListSizeInBits - 1 - index;
+ VIXL_ASSERT((1ULL << index) & list_);
+ Remove(index);
+ return CPURegister(index, size_, type_);
+}
+
+
+bool CPURegList::IsValid() const {
+ if ((type_ == CPURegister::kRegister) ||
+ (type_ == CPURegister::kVRegister)) {
+ bool is_valid = true;
+ // Try to create a CPURegister for each element in the list.
+ for (int i = 0; i < kRegListSizeInBits; i++) {
+ if (((list_ >> i) & 1) != 0) {
+ is_valid &= CPURegister(i, size_, type_).IsValid();
+ }
+ }
+ return is_valid;
+ } else if (type_ == CPURegister::kNoRegister) {
+ // We can't use IsEmpty here because that asserts IsValid().
+ return list_ == 0;
+ } else {
+ return false;
+ }
+}
+
+
+void CPURegList::RemoveCalleeSaved() {
+ if (type() == CPURegister::kRegister) {
+ Remove(GetCalleeSaved(RegisterSizeInBits()));
+ } else if (type() == CPURegister::kVRegister) {
+ Remove(GetCalleeSavedV(RegisterSizeInBits()));
+ } else {
+ VIXL_ASSERT(type() == CPURegister::kNoRegister);
+ VIXL_ASSERT(IsEmpty());
+ // The list must already be empty, so do nothing.
+ }
+}
+
+
+CPURegList CPURegList::Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3) {
+ return Union(list_1, Union(list_2, list_3));
+}
+
+
+CPURegList CPURegList::Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4) {
+ return Union(Union(list_1, list_2), Union(list_3, list_4));
+}
+
+
+CPURegList CPURegList::Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3) {
+ return Intersection(list_1, Intersection(list_2, list_3));
+}
+
+
+CPURegList CPURegList::Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4) {
+ return Intersection(Intersection(list_1, list_2),
+ Intersection(list_3, list_4));
+}
+
+
+CPURegList CPURegList::GetCalleeSaved(unsigned size) {
+ return CPURegList(CPURegister::kRegister, size, 19, 29);
+}
+
+
+CPURegList CPURegList::GetCalleeSavedV(unsigned size) {
+ return CPURegList(CPURegister::kVRegister, size, 8, 15);
+}
+
+
+CPURegList CPURegList::GetCallerSaved(unsigned size) {
+ // Registers x0-x18 and lr (x30) are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
+ // Do not use lr directly to avoid initialisation order fiasco bugs for users.
+ list.Combine(Register(30, kXRegSize));
+ return list;
+}
+
+
+CPURegList CPURegList::GetCallerSavedV(unsigned size) {
+ // Registers d0-d7 and d16-d31 are caller-saved.
+ CPURegList list = CPURegList(CPURegister::kVRegister, size, 0, 7);
+ list.Combine(CPURegList(CPURegister::kVRegister, size, 16, 31));
+ return list;
+}
+
+
+const CPURegList kCalleeSaved = CPURegList::GetCalleeSaved();
+const CPURegList kCalleeSavedV = CPURegList::GetCalleeSavedV();
+const CPURegList kCallerSaved = CPURegList::GetCallerSaved();
+const CPURegList kCallerSavedV = CPURegList::GetCallerSavedV();
+
+
+// Registers.
+#define WREG(n) w##n,
+const Register Register::wregisters[] = {
+REGISTER_CODE_LIST(WREG)
+};
+#undef WREG
+
+#define XREG(n) x##n,
+const Register Register::xregisters[] = {
+REGISTER_CODE_LIST(XREG)
+};
+#undef XREG
+
+#define BREG(n) b##n,
+const VRegister VRegister::bregisters[] = {
+REGISTER_CODE_LIST(BREG)
+};
+#undef BREG
+
+#define HREG(n) h##n,
+const VRegister VRegister::hregisters[] = {
+REGISTER_CODE_LIST(HREG)
+};
+#undef HREG
+
+#define SREG(n) s##n,
+const VRegister VRegister::sregisters[] = {
+REGISTER_CODE_LIST(SREG)
+};
+#undef SREG
+
+#define DREG(n) d##n,
+const VRegister VRegister::dregisters[] = {
+REGISTER_CODE_LIST(DREG)
+};
+#undef DREG
+
+#define QREG(n) q##n,
+const VRegister VRegister::qregisters[] = {
+REGISTER_CODE_LIST(QREG)
+};
+#undef QREG
+
+#define VREG(n) v##n,
+const VRegister VRegister::vregisters[] = {
+REGISTER_CODE_LIST(VREG)
+};
+#undef VREG
+
+
+const Register& Register::WRegFromCode(unsigned code) {
+ if (code == kSPRegInternalCode) {
+ return wsp;
+ } else {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ return wregisters[code];
+ }
+}
+
+
+const Register& Register::XRegFromCode(unsigned code) {
+ if (code == kSPRegInternalCode) {
+ return sp;
+ } else {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ return xregisters[code];
+ }
+}
+
+
+const VRegister& VRegister::BRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return bregisters[code];
+}
+
+
+const VRegister& VRegister::HRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return hregisters[code];
+}
+
+
+const VRegister& VRegister::SRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return sregisters[code];
+}
+
+
+const VRegister& VRegister::DRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return dregisters[code];
+}
+
+
+const VRegister& VRegister::QRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return qregisters[code];
+}
+
+
+const VRegister& VRegister::VRegFromCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return vregisters[code];
+}
+
+
+const Register& CPURegister::W() const {
+ VIXL_ASSERT(IsValidRegister());
+ return Register::WRegFromCode(code_);
+}
+
+
+const Register& CPURegister::X() const {
+ VIXL_ASSERT(IsValidRegister());
+ return Register::XRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::B() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::BRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::H() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::HRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::S() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::SRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::D() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::DRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::Q() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::QRegFromCode(code_);
+}
+
+
+const VRegister& CPURegister::V() const {
+ VIXL_ASSERT(IsValidVRegister());
+ return VRegister::VRegFromCode(code_);
+}
+
+
+// Operand.
+Operand::Operand(int64_t immediate)
+ : immediate_(immediate),
+ reg_(NoReg),
+ shift_(NO_SHIFT),
+ extend_(NO_EXTEND),
+ shift_amount_(0) {}
+
+
+Operand::Operand(Register reg, Shift shift, unsigned shift_amount)
+ : reg_(reg),
+ shift_(shift),
+ extend_(NO_EXTEND),
+ shift_amount_(shift_amount) {
+ VIXL_ASSERT(shift != MSL);
+ VIXL_ASSERT(reg.Is64Bits() || (shift_amount < kWRegSize));
+ VIXL_ASSERT(reg.Is32Bits() || (shift_amount < kXRegSize));
+ VIXL_ASSERT(!reg.IsSP());
+}
+
+
+Operand::Operand(Register reg, Extend extend, unsigned shift_amount)
+ : reg_(reg),
+ shift_(NO_SHIFT),
+ extend_(extend),
+ shift_amount_(shift_amount) {
+ VIXL_ASSERT(reg.IsValid());
+ VIXL_ASSERT(shift_amount <= 4);
+ VIXL_ASSERT(!reg.IsSP());
+
+ // Extend modes SXTX and UXTX require a 64-bit register.
+ VIXL_ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+}
+
+
+bool Operand::IsImmediate() const {
+ return reg_.Is(NoReg);
+}
+
+
+bool Operand::IsShiftedRegister() const {
+ return reg_.IsValid() && (shift_ != NO_SHIFT);
+}
+
+
+bool Operand::IsExtendedRegister() const {
+ return reg_.IsValid() && (extend_ != NO_EXTEND);
+}
+
+
+bool Operand::IsZero() const {
+ if (IsImmediate()) {
+ return immediate() == 0;
+ } else {
+ return reg().IsZero();
+ }
+}
+
+
+Operand Operand::ToExtendedRegister() const {
+ VIXL_ASSERT(IsShiftedRegister());
+ VIXL_ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+ return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
+}
+
+
+// MemOperand
+MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+ VIXL_ASSERT(!regoffset.IsSP());
+ VIXL_ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+
+ // SXTX extend mode requires a 64-bit offset register.
+ VIXL_ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+}
+
+
+MemOperand::MemOperand(Register base,
+ Register regoffset,
+ Shift shift,
+ unsigned shift_amount)
+ : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
+ shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+ VIXL_ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
+ VIXL_ASSERT(shift == LSL);
+}
+
+
+MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
+ : base_(base), regoffset_(NoReg), addrmode_(addrmode) {
+ VIXL_ASSERT(base.Is64Bits() && !base.IsZero());
+
+ if (offset.IsImmediate()) {
+ offset_ = offset.immediate();
+ } else if (offset.IsShiftedRegister()) {
+ VIXL_ASSERT((addrmode == Offset) || (addrmode == PostIndex));
+
+ regoffset_ = offset.reg();
+ shift_ = offset.shift();
+ shift_amount_ = offset.shift_amount();
+
+ extend_ = NO_EXTEND;
+ offset_ = 0;
+
+ // These assertions match those in the shifted-register constructor.
+ VIXL_ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
+ VIXL_ASSERT(shift_ == LSL);
+ } else {
+ VIXL_ASSERT(offset.IsExtendedRegister());
+ VIXL_ASSERT(addrmode == Offset);
+
+ regoffset_ = offset.reg();
+ extend_ = offset.extend();
+ shift_amount_ = offset.shift_amount();
+
+ shift_ = NO_SHIFT;
+ offset_ = 0;
+
+ // These assertions match those in the extended-register constructor.
+ VIXL_ASSERT(!regoffset_.IsSP());
+ VIXL_ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+ VIXL_ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+ }
+}
+
+
+bool MemOperand::IsImmediateOffset() const {
+ return (addrmode_ == Offset) && regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsRegisterOffset() const {
+ return (addrmode_ == Offset) && !regoffset_.Is(NoReg);
+}
+
+
+bool MemOperand::IsPreIndex() const {
+ return addrmode_ == PreIndex;
+}
+
+
+bool MemOperand::IsPostIndex() const {
+ return addrmode_ == PostIndex;
+}
+
+
+void MemOperand::AddOffset(int64_t offset) {
+ VIXL_ASSERT(IsImmediateOffset());
+ offset_ += offset;
+}
+
+
+// Assembler
+Assembler::Assembler(PositionIndependentCodeOption pic)
+ : pic_(pic),
+ cpu_features_(CPUFeatures::AArch64LegacyBaseline())
+{
+ // Mozilla change: always use maximally-present features.
+ cpu_features_.Combine(CPUFeatures::InferFromOS());
+
+ // Mozilla change: Compile time hard-coded value from js-config.mozbuild.
+#ifndef MOZ_AARCH64_JSCVT
+# error "MOZ_AARCH64_JSCVT must be defined."
+#elif MOZ_AARCH64_JSCVT >= 1
+ // Note, vixl backend implements the JSCVT flag as a boolean despite having 3
+ // extra bits reserved for forward compatibility in the ARMv8 documentation.
+ cpu_features_.Combine(CPUFeatures::kJSCVT);
+#endif
+}
+
+
+// Code generation.
+void Assembler::br(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(BR | Rn(xn));
+}
+
+
+void Assembler::blr(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(BLR | Rn(xn));
+}
+
+
+void Assembler::ret(const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ Emit(RET | Rn(xn));
+}
+
+
+void Assembler::NEONTable(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONTableOp op) {
+ VIXL_ASSERT(vd.Is16B() || vd.Is8B());
+ VIXL_ASSERT(vn.Is16B());
+ VIXL_ASSERT(AreSameFormat(vd, vm));
+ Emit(op | (vd.IsQ() ? NEON_Q : 0) | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONTable(vd, vn, vm, NEON_TBL_1v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ USE(vn2);
+ VIXL_ASSERT(AreSameFormat(vn, vn2));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_2v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ USE(vn2, vn3);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_3v);
+}
+
+
+void Assembler::tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ USE(vn2, vn3, vn4);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBL_4v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONTable(vd, vn, vm, NEON_TBX_1v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ USE(vn2);
+ VIXL_ASSERT(AreSameFormat(vn, vn2));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_2v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ USE(vn2, vn3);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_3v);
+}
+
+
+void Assembler::tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ USE(vn2, vn3, vn4);
+ VIXL_ASSERT(AreSameFormat(vn, vn2, vn3, vn4));
+ VIXL_ASSERT(vn2.code() == ((vn.code() + 1) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn3.code() == ((vn.code() + 2) % kNumberOfVRegisters));
+ VIXL_ASSERT(vn4.code() == ((vn.code() + 3) % kNumberOfVRegisters));
+
+ NEONTable(vd, vn, vm, NEON_TBX_4v);
+}
+
+
+void Assembler::add(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, ADD);
+}
+
+
+void Assembler::adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, ADD);
+}
+
+
+void Assembler::cmn(const Register& rn,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ adds(zr, rn, operand);
+}
+
+
+void Assembler::sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, LeaveFlags, SUB);
+}
+
+
+void Assembler::subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSub(rd, rn, operand, SetFlags, SUB);
+}
+
+
+void Assembler::cmp(const Register& rn, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rn);
+ subs(zr, rn, operand);
+}
+
+
+void Assembler::neg(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sub(rd, zr, operand);
+}
+
+
+void Assembler::negs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ subs(rd, zr, operand);
+}
+
+
+void Assembler::adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void Assembler::adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void Assembler::sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void Assembler::sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void Assembler::ngc(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbc(rd, zr, operand);
+}
+
+
+void Assembler::ngcs(const Register& rd, const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ sbcs(rd, zr, operand);
+}
+
+
+// Logical instructions.
+void Assembler::and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, AND);
+}
+
+
+void Assembler::bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BIC);
+}
+
+
+void Assembler::bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, BICS);
+}
+
+
+void Assembler::orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORR);
+}
+
+
+void Assembler::orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, ORN);
+}
+
+
+void Assembler::eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EOR);
+}
+
+
+void Assembler::eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Logical(rd, rn, operand, EON);
+}
+
+
+void Assembler::lslv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::lsrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::asrv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rorv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+// Bitfield operations.
+void Assembler::bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | BFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.Is64Bits() || rn.Is32Bits());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | SBFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | UBFM | N |
+ ImmR(immr, rd.size()) | ImmS(imms, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
+ Emit(SF(rd) | EXTR | N | Rm(rm) | ImmS(lsb, rn.size()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSEL);
+}
+
+
+void Assembler::csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINC);
+}
+
+
+void Assembler::csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSINV);
+}
+
+
+void Assembler::csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ ConditionalSelect(rd, rn, rm, cond, CSNEG);
+}
+
+
+void Assembler::cset(const Register &rd, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinc(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::csetm(const Register &rd, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ Register zr = AppropriateZeroRegFor(rd);
+ csinv(rd, zr, zr, InvertCondition(cond));
+}
+
+
+void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csinc(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csinv(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ csneg(rd, rn, rn, InvertCondition(cond));
+}
+
+
+void Assembler::ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMN);
+}
+
+
+void Assembler::ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ ConditionalCompare(rn, operand, nzcv, cond, CCMP);
+}
+
+
+void Assembler::DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op) {
+ Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32B | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32H | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32W | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32X | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CB | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CH | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is32Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CW | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is32Bits() && rn.Is32Bits() && rm.Is64Bits());
+ Emit(SF(rm) | Rm(rm) | CRC32CX | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::mul(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MADD);
+}
+
+
+void Assembler::madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ DataProcessing3Source(rd, rn, rm, ra, MADD);
+}
+
+
+void Assembler::mneg(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(AreSameSizeAndType(rd, rn, rm));
+ DataProcessing3Source(rd, rn, rm, AppropriateZeroRegFor(rd), MSUB);
+}
+
+
+void Assembler::msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ DataProcessing3Source(rd, rn, rm, ra, MSUB);
+}
+
+
+void Assembler::umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
+}
+
+
+void Assembler::smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
+}
+
+
+void Assembler::umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
+}
+
+
+void Assembler::smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(rd.Is64Bits() && ra.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
+}
+
+
+void Assembler::smull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.Is64Bits());
+ VIXL_ASSERT(rn.Is32Bits() && rm.Is32Bits());
+ DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
+}
+
+
+void Assembler::sdiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::smulh(const Register& xd,
+ const Register& xn,
+ const Register& xm) {
+ VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
+ DataProcessing3Source(xd, xn, xm, xzr, SMULH_x);
+}
+
+
+void Assembler::umulh(const Register& xd,
+ const Register& xn,
+ const Register& xm) {
+ VIXL_ASSERT(xd.Is64Bits() && xn.Is64Bits() && xm.Is64Bits());
+ DataProcessing3Source(xd, xn, xm, xzr, UMULH_x);
+}
+
+
+void Assembler::udiv(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == rm.size());
+ Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::rbit(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, RBIT);
+}
+
+
+void Assembler::rev16(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, REV16);
+}
+
+
+void Assembler::rev32(const Register& rd,
+ const Register& rn) {
+ VIXL_ASSERT(rd.Is64Bits());
+ DataProcessing1Source(rd, rn, REV);
+}
+
+
+void Assembler::rev(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
+}
+
+
+void Assembler::clz(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLZ);
+}
+
+
+void Assembler::cls(const Register& rd,
+ const Register& rn) {
+ DataProcessing1Source(rd, rn, CLS);
+}
+
+
+void Assembler::ldp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
+}
+
+
+void Assembler::stp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
+}
+
+
+void Assembler::ldpsw(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.Is64Bits());
+ LoadStorePair(rt, rt2, src, LDPSW_x);
+}
+
+
+void Assembler::LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // 'rt' and 'rt2' can only be aliased for stores.
+ VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ VIXL_ASSERT(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
+
+ int offset = static_cast<int>(addr.offset());
+ Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
+ ImmLSPair(offset, CalcLSPairDataSize(op));
+
+ Instr addrmodeop;
+ if (addr.IsImmediateOffset()) {
+ addrmodeop = LoadStorePairOffsetFixed;
+ } else {
+ VIXL_ASSERT(addr.offset() != 0);
+ if (addr.IsPreIndex()) {
+ addrmodeop = LoadStorePairPreIndexFixed;
+ } else {
+ VIXL_ASSERT(addr.IsPostIndex());
+ addrmodeop = LoadStorePairPostIndexFixed;
+ }
+ }
+ Emit(addrmodeop | memop);
+}
+
+
+void Assembler::ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ LoadStorePairNonTemporal(rt, rt2, src,
+ LoadPairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ LoadStorePairNonTemporal(rt, rt2, dst,
+ StorePairNonTemporalOpFor(rt, rt2));
+}
+
+
+void Assembler::LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op) {
+ VIXL_ASSERT(!rt.Is(rt2));
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ VIXL_ASSERT(addr.IsImmediateOffset());
+
+ unsigned size = CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(op & LoadStorePairMask));
+ VIXL_ASSERT(IsImmLSPair(addr.offset(), size));
+ int offset = static_cast<int>(addr.offset());
+ Emit(op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) | ImmLSPair(offset, size));
+}
+
+
+// Memory instructions.
+void Assembler::ldrb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRB_w, option);
+}
+
+
+void Assembler::strb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, STRB_w, option);
+}
+
+
+void Assembler::ldrsb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
+}
+
+
+void Assembler::ldrh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRH_w, option);
+}
+
+
+void Assembler::strh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, STRH_w, option);
+}
+
+
+void Assembler::ldrsh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
+}
+
+
+void Assembler::ldr(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LoadOpFor(rt), option);
+}
+
+
+void Assembler::str(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, dst, StoreOpFor(rt), option);
+}
+
+
+void Assembler::ldrsw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(rt.Is64Bits());
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ LoadStore(rt, src, LDRSW_x, option);
+}
+
+
+void Assembler::ldurb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRB_w, option);
+}
+
+
+void Assembler::sturb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, STRB_w, option);
+}
+
+
+void Assembler::ldursb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w, option);
+}
+
+
+void Assembler::ldurh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRH_w, option);
+}
+
+
+void Assembler::sturh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, STRH_w, option);
+}
+
+
+void Assembler::ldursh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w, option);
+}
+
+
+void Assembler::ldur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LoadOpFor(rt), option);
+}
+
+
+void Assembler::stur(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, dst, StoreOpFor(rt), option);
+}
+
+
+void Assembler::ldursw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(rt.Is64Bits());
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ LoadStore(rt, src, LDRSW_x, option);
+}
+
+
+void Assembler::ldrsw(const Register& rt, int imm19) {
+ Emit(LDRSW_x_lit | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+void Assembler::ldr(const CPURegister& rt, int imm19) {
+ LoadLiteralOp op = LoadLiteralOpFor(rt);
+ Emit(op | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+void Assembler::prfm(PrefetchOperation op, int imm19) {
+ Emit(PRFM_lit | ImmPrefetchOperation(op) | ImmLLiteral(imm19));
+}
+
+
+// Exclusive-access instructions.
+void Assembler::stxrb(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stxrh(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stxr(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STXR_x : STXR_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldxrb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldxrh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldxr(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDXR_x : LDXR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STXP_x : STXP_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base()));
+}
+
+
+void Assembler::ldxp(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDXP_x : LDXP_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base()));
+}
+
+
+void Assembler::stlxrb(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLXRB_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlxrh(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLXRH_w | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlxr(const Register& rs,
+ const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLXR_x : STLXR_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldaxrb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDAXRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldaxrh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDAXRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldaxr(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAXR_x : LDAXR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLXP_x : STLXP_w;
+ Emit(op | Rs(rs) | Rt(rt) | Rt2(rt2) | RnSP(dst.base()));
+}
+
+
+void Assembler::ldaxp(const Register& rt,
+ const Register& rt2,
+ const MemOperand& src) {
+ VIXL_ASSERT(rt.size() == rt2.size());
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAXP_x : LDAXP_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2(rt2) | RnSP(src.base()));
+}
+
+
+void Assembler::stlrb(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLRB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlrh(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ Emit(STLRH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::stlr(const Register& rt,
+ const MemOperand& dst) {
+ VIXL_ASSERT(dst.IsImmediateOffset() && (dst.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? STLR_x : STLR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(dst.base()));
+}
+
+
+void Assembler::ldarb(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDARB_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldarh(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ Emit(LDARH_w | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::ldar(const Register& rt,
+ const MemOperand& src) {
+ VIXL_ASSERT(src.IsImmediateOffset() && (src.offset() == 0));
+ LoadStoreExclusive op = rt.Is64Bits() ? LDAR_x : LDAR_w;
+ Emit(op | Rs_mask | Rt(rt) | Rt2_mask | RnSP(src.base()));
+}
+
+
+void Assembler::prfm(PrefetchOperation op, const MemOperand& address,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireUnscaledOffset);
+ VIXL_ASSERT(option != PreferUnscaledOffset);
+ Prefetch(op, address, option);
+}
+
+
+void Assembler::prfum(PrefetchOperation op, const MemOperand& address,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(option != RequireScaledOffset);
+ VIXL_ASSERT(option != PreferScaledOffset);
+ Prefetch(op, address, option);
+}
+
+
+void Assembler::sys(int op1, int crn, int crm, int op2, const Register& rt) {
+ Emit(SYS | ImmSysOp1(op1) | CRn(crn) | CRm(crm) | ImmSysOp2(op2) | Rt(rt));
+}
+
+
+void Assembler::sys(int op, const Register& rt) {
+ Emit(SYS | SysOp(op) | Rt(rt));
+}
+
+
+void Assembler::dc(DataCacheOp op, const Register& rt) {
+ VIXL_ASSERT((op == CVAC) || (op == CVAU) || (op == CIVAC) || (op == ZVA));
+ sys(op, rt);
+}
+
+
+void Assembler::ic(InstructionCacheOp op, const Register& rt) {
+ VIXL_ASSERT(op == IVAU);
+ sys(op, rt);
+}
+
+
+// NEON structure loads and stores.
+Instr Assembler::LoadStoreStructAddrModeField(const MemOperand& addr) {
+ Instr addr_field = RnSP(addr.base());
+
+ if (addr.IsPostIndex()) {
+ VIXL_STATIC_ASSERT(NEONLoadStoreMultiStructPostIndex ==
+ static_cast<NEONLoadStoreMultiStructPostIndexOp>(
+ NEONLoadStoreSingleStructPostIndex));
+
+ addr_field |= NEONLoadStoreMultiStructPostIndex;
+ if (addr.offset() == 0) {
+ addr_field |= RmNot31(addr.regoffset());
+ } else {
+ // The immediate post index addressing mode is indicated by rm = 31.
+ // The immediate is implied by the number of vector registers used.
+ addr_field |= (0x1f << Rm_offset);
+ }
+ } else {
+ VIXL_ASSERT(addr.IsImmediateOffset() && (addr.offset() == 0));
+ }
+ return addr_field;
+}
+
+void Assembler::LoadStoreStructVerify(const VRegister& vt,
+ const MemOperand& addr,
+ Instr op) {
+#ifdef DEBUG
+ // Assert that addressing mode is either offset (with immediate 0), post
+ // index by immediate of the size of the register list, or post index by a
+ // value in a core register.
+ if (addr.IsImmediateOffset()) {
+ VIXL_ASSERT(addr.offset() == 0);
+ } else {
+ int offset = vt.SizeInBytes();
+ switch (op) {
+ case NEON_LD1_1v:
+ case NEON_ST1_1v:
+ offset *= 1; break;
+ case NEONLoadStoreSingleStructLoad1:
+ case NEONLoadStoreSingleStructStore1:
+ case NEON_LD1R:
+ offset = (offset / vt.lanes()) * 1; break;
+
+ case NEON_LD1_2v:
+ case NEON_ST1_2v:
+ case NEON_LD2:
+ case NEON_ST2:
+ offset *= 2;
+ break;
+ case NEONLoadStoreSingleStructLoad2:
+ case NEONLoadStoreSingleStructStore2:
+ case NEON_LD2R:
+ offset = (offset / vt.lanes()) * 2; break;
+
+ case NEON_LD1_3v:
+ case NEON_ST1_3v:
+ case NEON_LD3:
+ case NEON_ST3:
+ offset *= 3; break;
+ case NEONLoadStoreSingleStructLoad3:
+ case NEONLoadStoreSingleStructStore3:
+ case NEON_LD3R:
+ offset = (offset / vt.lanes()) * 3; break;
+
+ case NEON_LD1_4v:
+ case NEON_ST1_4v:
+ case NEON_LD4:
+ case NEON_ST4:
+ offset *= 4; break;
+ case NEONLoadStoreSingleStructLoad4:
+ case NEONLoadStoreSingleStructStore4:
+ case NEON_LD4R:
+ offset = (offset / vt.lanes()) * 4; break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ VIXL_ASSERT(!addr.regoffset().Is(NoReg) ||
+ addr.offset() == offset);
+ }
+#else
+ USE(vt, addr, op);
+#endif
+}
+
+void Assembler::LoadStoreStruct(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreMultiStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+ VIXL_ASSERT(vt.IsVector() || vt.Is1D());
+ Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
+}
+
+
+void Assembler::LoadStoreStructSingleAllLanes(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+ Emit(op | LoadStoreStructAddrModeField(addr) | LSVFormat(vt) | Rt(vt));
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStruct(vt, src, NEON_LD1_1v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_LD1_2v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_LD1_3v);
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_LD1_4v);
+}
+
+
+void Assembler::ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_LD2);
+}
+
+
+void Assembler::ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad2);
+}
+
+
+void Assembler::ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD2R);
+}
+
+
+void Assembler::ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_LD3);
+}
+
+
+void Assembler::ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad3);
+}
+
+
+void Assembler::ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD3R);
+}
+
+
+void Assembler::ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_LD4);
+}
+
+
+void Assembler::ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad4);
+}
+
+
+void Assembler::ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD4R);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStruct(vt, src, NEON_ST1_1v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, src, NEON_ST1_2v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, src, NEON_ST1_3v);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, src, NEON_ST1_4v);
+}
+
+
+void Assembler::st2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStruct(vt, dst, NEON_ST2);
+}
+
+
+void Assembler::st2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2);
+ VIXL_ASSERT(AreSameFormat(vt, vt2));
+ VIXL_ASSERT(AreConsecutive(vt, vt2));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore2);
+}
+
+
+void Assembler::st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStruct(vt, dst, NEON_ST3);
+}
+
+
+void Assembler::st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2, vt3);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore3);
+}
+
+
+void Assembler::st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStruct(vt, dst, NEON_ST4);
+}
+
+
+void Assembler::st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& dst) {
+ USE(vt2, vt3, vt4);
+ VIXL_ASSERT(AreSameFormat(vt, vt2, vt3, vt4));
+ VIXL_ASSERT(AreConsecutive(vt, vt2, vt3, vt4));
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore4);
+}
+
+
+void Assembler::LoadStoreStructSingle(const VRegister& vt,
+ uint32_t lane,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op) {
+ LoadStoreStructVerify(vt, addr, op);
+
+ // We support vt arguments of the form vt.VxT() or vt.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ unsigned lane_size = vt.LaneSizeInBytes();
+ VIXL_ASSERT(lane < (kQRegSizeInBytes / lane_size));
+
+ // Lane size is encoded in the opcode field. Lane index is encoded in the Q,
+ // S and size fields.
+ lane *= lane_size;
+ if (lane_size == 8) lane++;
+
+ Instr size = (lane << NEONLSSize_offset) & NEONLSSize_mask;
+ Instr s = (lane << (NEONS_offset - 2)) & NEONS_mask;
+ Instr q = (lane << (NEONQ_offset - 3)) & NEONQ_mask;
+
+ Instr instr = op;
+ switch (lane_size) {
+ case 1: instr |= NEONLoadStoreSingle_b; break;
+ case 2: instr |= NEONLoadStoreSingle_h; break;
+ case 4: instr |= NEONLoadStoreSingle_s; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ instr |= NEONLoadStoreSingle_d;
+ }
+
+ Emit(instr | LoadStoreStructAddrModeField(addr) | q | size | s | Rt(vt));
+}
+
+
+void Assembler::ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src) {
+ LoadStoreStructSingle(vt, lane, src, NEONLoadStoreSingleStructLoad1);
+}
+
+
+void Assembler::ld1r(const VRegister& vt,
+ const MemOperand& src) {
+ LoadStoreStructSingleAllLanes(vt, src, NEON_LD1R);
+}
+
+
+void Assembler::st1(const VRegister& vt,
+ int lane,
+ const MemOperand& dst) {
+ LoadStoreStructSingle(vt, lane, dst, NEONLoadStoreSingleStructStore1);
+}
+
+
+void Assembler::NEON3DifferentL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vn, vm));
+ VIXL_ASSERT((vn.Is1H() && vd.Is1S()) ||
+ (vn.Is1S() && vd.Is1D()) ||
+ (vn.Is8B() && vd.Is8H()) ||
+ (vn.Is4H() && vd.Is4S()) ||
+ (vn.Is2S() && vd.Is2D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON3DifferentW(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vm.Is8B() && vd.Is8H()) ||
+ (vm.Is4H() && vd.Is4S()) ||
+ (vm.Is2S() && vd.Is2D()) ||
+ (vm.Is16B() && vd.Is8H())||
+ (vm.Is8H() && vd.Is4S()) ||
+ (vm.Is4S() && vd.Is2D()));
+ Emit(VFormat(vm) | vop | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON3DifferentHN(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop) {
+ VIXL_ASSERT(AreSameFormat(vm, vn));
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H())||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ Emit(VFormat(vd) | vop | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_3DIFF_LONG_LIST(V) \
+ V(pmull, NEON_PMULL, vn.IsVector() && vn.Is8B()) \
+ V(pmull2, NEON_PMULL2, vn.IsVector() && vn.Is16B()) \
+ V(saddl, NEON_SADDL, vn.IsVector() && vn.IsD()) \
+ V(saddl2, NEON_SADDL2, vn.IsVector() && vn.IsQ()) \
+ V(sabal, NEON_SABAL, vn.IsVector() && vn.IsD()) \
+ V(sabal2, NEON_SABAL2, vn.IsVector() && vn.IsQ()) \
+ V(uabal, NEON_UABAL, vn.IsVector() && vn.IsD()) \
+ V(uabal2, NEON_UABAL2, vn.IsVector() && vn.IsQ()) \
+ V(sabdl, NEON_SABDL, vn.IsVector() && vn.IsD()) \
+ V(sabdl2, NEON_SABDL2, vn.IsVector() && vn.IsQ()) \
+ V(uabdl, NEON_UABDL, vn.IsVector() && vn.IsD()) \
+ V(uabdl2, NEON_UABDL2, vn.IsVector() && vn.IsQ()) \
+ V(smlal, NEON_SMLAL, vn.IsVector() && vn.IsD()) \
+ V(smlal2, NEON_SMLAL2, vn.IsVector() && vn.IsQ()) \
+ V(umlal, NEON_UMLAL, vn.IsVector() && vn.IsD()) \
+ V(umlal2, NEON_UMLAL2, vn.IsVector() && vn.IsQ()) \
+ V(smlsl, NEON_SMLSL, vn.IsVector() && vn.IsD()) \
+ V(smlsl2, NEON_SMLSL2, vn.IsVector() && vn.IsQ()) \
+ V(umlsl, NEON_UMLSL, vn.IsVector() && vn.IsD()) \
+ V(umlsl2, NEON_UMLSL2, vn.IsVector() && vn.IsQ()) \
+ V(smull, NEON_SMULL, vn.IsVector() && vn.IsD()) \
+ V(smull2, NEON_SMULL2, vn.IsVector() && vn.IsQ()) \
+ V(umull, NEON_UMULL, vn.IsVector() && vn.IsD()) \
+ V(umull2, NEON_UMULL2, vn.IsVector() && vn.IsQ()) \
+ V(ssubl, NEON_SSUBL, vn.IsVector() && vn.IsD()) \
+ V(ssubl2, NEON_SSUBL2, vn.IsVector() && vn.IsQ()) \
+ V(uaddl, NEON_UADDL, vn.IsVector() && vn.IsD()) \
+ V(uaddl2, NEON_UADDL2, vn.IsVector() && vn.IsQ()) \
+ V(usubl, NEON_USUBL, vn.IsVector() && vn.IsD()) \
+ V(usubl2, NEON_USUBL2, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlal, NEON_SQDMLAL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmlal2, NEON_SQDMLAL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+ V(sqdmlsl, NEON_SQDMLSL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmlsl2, NEON_SQDMLSL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+ V(sqdmull, NEON_SQDMULL, vn.Is1H() || vn.Is1S() || vn.Is4H() || vn.Is2S()) \
+ V(sqdmull2, NEON_SQDMULL2, vn.Is1H() || vn.Is1S() || vn.Is8H() || vn.Is4S()) \
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3DifferentL(vd, vn, vm, OP); \
+}
+NEON_3DIFF_LONG_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+#define NEON_3DIFF_HN_LIST(V) \
+ V(addhn, NEON_ADDHN, vd.IsD()) \
+ V(addhn2, NEON_ADDHN2, vd.IsQ()) \
+ V(raddhn, NEON_RADDHN, vd.IsD()) \
+ V(raddhn2, NEON_RADDHN2, vd.IsQ()) \
+ V(subhn, NEON_SUBHN, vd.IsD()) \
+ V(subhn2, NEON_SUBHN2, vd.IsQ()) \
+ V(rsubhn, NEON_RSUBHN, vd.IsD()) \
+ V(rsubhn2, NEON_RSUBHN2, vd.IsQ())
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3DifferentHN(vd, vn, vm, OP); \
+}
+NEON_3DIFF_HN_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+void Assembler::uaddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_UADDW);
+}
+
+
+void Assembler::uaddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_UADDW2);
+}
+
+
+void Assembler::saddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_SADDW);
+}
+
+
+void Assembler::saddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_SADDW2);
+}
+
+
+void Assembler::usubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_USUBW);
+}
+
+
+void Assembler::usubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_USUBW2);
+}
+
+
+void Assembler::ssubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsD());
+ NEON3DifferentW(vd, vn, vm, NEON_SSUBW);
+}
+
+
+void Assembler::ssubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(vm.IsQ());
+ NEON3DifferentW(vd, vn, vm, NEON_SSUBW2);
+}
+
+
+void Assembler::mov(const Register& rd, const Register& rm) {
+ // Moves involving the stack pointer are encoded as add immediate with
+ // second operand of zero. Otherwise, orr with first operand zr is
+ // used.
+ if (rd.IsSP() || rm.IsSP()) {
+ add(rd, rm, 0);
+ } else {
+ orr(rd, AppropriateZeroRegFor(rd), rm);
+ }
+}
+
+
+void Assembler::mvn(const Register& rd, const Operand& operand) {
+ orn(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
+ VIXL_ASSERT(rt.Is64Bits());
+ Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
+}
+
+
+void Assembler::msr(SystemRegister sysreg, const Register& rt) {
+ VIXL_ASSERT(rt.Is64Bits());
+ Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
+}
+
+
+void Assembler::clrex(int imm4) {
+ Emit(CLREX | CRm(imm4));
+}
+
+
+void Assembler::dmb(BarrierDomain domain, BarrierType type) {
+ Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::dsb(BarrierDomain domain, BarrierType type) {
+ Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
+}
+
+
+void Assembler::isb() {
+ Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
+}
+
+
+void Assembler::fmov(const VRegister& vd, double imm) {
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1D());
+ Emit(FMOV_d_imm | Rd(vd) | ImmFP64(imm));
+ } else {
+ VIXL_ASSERT(vd.Is2D());
+ Instr op = NEONModifiedImmediate_MOVI | NEONModifiedImmediateOpBit;
+ Instr q = NEON_Q;
+ uint32_t encoded_imm = FP64ToImm8(imm);
+ Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
+ }
+}
+
+
+void Assembler::fmov(const VRegister& vd, float imm) {
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S());
+ Emit(FMOV_s_imm | Rd(vd) | ImmFP32(imm));
+ } else {
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ Instr op = NEONModifiedImmediate_MOVI;
+ Instr q = vd.Is4S() ? NEON_Q : 0;
+ uint32_t encoded_imm = FP32ToImm8(imm);
+ Emit(q | op | ImmNEONabcdefgh(encoded_imm) | NEONCmode(0xf) | Rd(vd));
+ }
+}
+
+
+void Assembler::fmov(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(rd.size() == vn.size());
+ FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
+ Emit(op | Rd(rd) | Rn(vn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, const Register& rn) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(vd.size() == rn.size());
+ FPIntegerConvertOp op = vd.Is32Bits() ? FMOV_sw : FMOV_dx;
+ Emit(op | Rd(vd) | Rn(rn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, const VRegister& vn) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(vd.IsSameFormat(vn));
+ Emit(FPType(vd) | FMOV | Rd(vd) | Rn(vn));
+}
+
+
+void Assembler::fmov(const VRegister& vd, int index, const Register& rn) {
+ VIXL_ASSERT((index == 1) && vd.Is1D() && rn.IsX());
+ USE(index);
+ Emit(FMOV_d1_x | Rd(vd) | Rn(rn));
+}
+
+
+void Assembler::fmov(const Register& rd, const VRegister& vn, int index) {
+ VIXL_ASSERT((index == 1) && vn.Is1D() && rd.IsX());
+ USE(index);
+ Emit(FMOV_x_d1 | Rd(rd) | Rn(vn));
+}
+
+
+void Assembler::fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMADD_s : FMADD_d);
+}
+
+
+void Assembler::fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FMSUB_s : FMSUB_d);
+}
+
+
+void Assembler::fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMADD_s : FNMADD_d);
+}
+
+
+void Assembler::fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ FPDataProcessing3Source(vd, vn, vm, va, vd.Is1S() ? FNMSUB_s : FNMSUB_d);
+}
+
+
+void Assembler::fnmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm));
+ Instr op = vd.Is1S() ? FNMUL_s : FNMUL_d;
+ Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::FPCompareMacro(const VRegister& vn,
+ double value,
+ FPTrapFlags trap) {
+ USE(value);
+ // Although the fcmp{e} instructions can strictly only take an immediate
+ // value of +0.0, we don't need to check for -0.0 because the sign of 0.0
+ // doesn't affect the result of the comparison.
+ VIXL_ASSERT(value == 0.0);
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ Instr op = (trap == EnableTrap) ? FCMPE_zero : FCMP_zero;
+ Emit(FPType(vn) | op | Rn(vn));
+}
+
+
+void Assembler::FPCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ FPTrapFlags trap) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(vn.IsSameSizeAndType(vm));
+ Instr op = (trap == EnableTrap) ? FCMPE : FCMP;
+ Emit(FPType(vn) | op | Rm(vm) | Rn(vn));
+}
+
+
+void Assembler::fcmp(const VRegister& vn,
+ const VRegister& vm) {
+ FPCompareMacro(vn, vm, DisableTrap);
+}
+
+
+void Assembler::fcmpe(const VRegister& vn,
+ const VRegister& vm) {
+ FPCompareMacro(vn, vm, EnableTrap);
+}
+
+
+void Assembler::fcmp(const VRegister& vn,
+ double value) {
+ FPCompareMacro(vn, value, DisableTrap);
+}
+
+
+void Assembler::fcmpe(const VRegister& vn,
+ double value) {
+ FPCompareMacro(vn, value, EnableTrap);
+}
+
+
+void Assembler::FPCCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT(vn.IsSameSizeAndType(vm));
+ Instr op = (trap == EnableTrap) ? FCCMPE : FCCMP;
+ Emit(FPType(vn) | op | Rm(vm) | Cond(cond) | Rn(vn) | Nzcv(nzcv));
+}
+
+void Assembler::fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ FPCCompareMacro(vn, vm, nzcv, cond, DisableTrap);
+}
+
+
+void Assembler::fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ FPCCompareMacro(vn, vm, nzcv, cond, EnableTrap);
+}
+
+
+void Assembler::fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ Emit(FPType(vd) | FCSEL | Rm(vm) | Cond(cond) | Rn(vn) | Rd(vd));
+}
+
+void Assembler::fjcvtzs(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(CPUHas(CPUFeatures::kFP, CPUFeatures::kJSCVT));
+ VIXL_ASSERT(rd.IsW() && vn.Is1D());
+ Emit(FJCVTZS | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::NEONFPConvertToInt(const Register& rd,
+ const VRegister& vn,
+ Instr op) {
+ Emit(SF(rd) | FPType(vn) | op | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::NEONFPConvertToInt(const VRegister& vd,
+ const VRegister& vn,
+ Instr op) {
+ if (vn.IsScalar()) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is1S()) || (vd.Is1D() && vn.Is1D()));
+ op |= NEON_Q | NEONScalar;
+ }
+ Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvt(const VRegister& vd,
+ const VRegister& vn) {
+ FPDataProcessing1SourceOp op;
+ if (vd.Is1D()) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1H());
+ op = vn.Is1S() ? FCVT_ds : FCVT_dh;
+ } else if (vd.Is1S()) {
+ VIXL_ASSERT(vn.Is1D() || vn.Is1H());
+ op = vn.Is1D() ? FCVT_sd : FCVT_sh;
+ } else {
+ VIXL_ASSERT(vd.Is1H());
+ VIXL_ASSERT(vn.Is1D() || vn.Is1S());
+ op = vn.Is1D() ? FCVT_hd : FCVT_hs;
+ }
+ FPDataProcessing1Source(vd, vn, op);
+}
+
+
+void Assembler::fcvtl(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is4H()) ||
+ (vd.Is2D() && vn.Is2S()));
+ Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(format | NEON_FCVTL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtl2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is8H()) ||
+ (vd.Is2D() && vn.Is4S()));
+ Instr format = vd.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(NEON_Q | format | NEON_FCVTL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vn.Is4S() && vd.Is4H()) ||
+ (vn.Is2D() && vd.Is2S()));
+ Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(format | NEON_FCVTN | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vn.Is4S() && vd.Is8H()) ||
+ (vn.Is2D() && vd.Is4S()));
+ Instr format = vn.Is2D() ? (1 << NEONSize_offset) : 0;
+ Emit(NEON_Q | format | NEON_FCVTN | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcvtxn(const VRegister& vd,
+ const VRegister& vn) {
+ Instr format = 1 << NEONSize_offset;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S() && vn.Is1D());
+ Emit(format | NEON_FCVTXN_scalar | Rn(vn) | Rd(vd));
+ } else {
+ VIXL_ASSERT(vd.Is2S() && vn.Is2D());
+ Emit(format | NEON_FCVTXN | Rn(vn) | Rd(vd));
+ }
+}
+
+
+void Assembler::fcvtxn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.Is4S() && vn.Is2D());
+ Instr format = 1 << NEONSize_offset;
+ Emit(NEON_Q | format | NEON_FCVTXN | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_FP2REGMISC_FCVT_LIST(V) \
+ V(fcvtnu, NEON_FCVTNU, FCVTNU) \
+ V(fcvtns, NEON_FCVTNS, FCVTNS) \
+ V(fcvtpu, NEON_FCVTPU, FCVTPU) \
+ V(fcvtps, NEON_FCVTPS, FCVTPS) \
+ V(fcvtmu, NEON_FCVTMU, FCVTMU) \
+ V(fcvtms, NEON_FCVTMS, FCVTMS) \
+ V(fcvtau, NEON_FCVTAU, FCVTAU) \
+ V(fcvtas, NEON_FCVTAS, FCVTAS)
+
+#define DEFINE_ASM_FUNCS(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const Register& rd, \
+ const VRegister& vn) { \
+ NEONFPConvertToInt(rd, vn, SCA_OP); \
+} \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ NEONFPConvertToInt(vd, vn, VEC_OP); \
+}
+NEON_FP2REGMISC_FCVT_LIST(DEFINE_ASM_FUNCS)
+#undef DEFINE_ASM_FUNCS
+
+
+void Assembler::fcvtzs(const Register& rd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits()));
+ if (fbits == 0) {
+ Emit(SF(rd) | FPType(vn) | FCVTZS | Rn(vn) | Rd(rd));
+ } else {
+ Emit(SF(rd) | FPType(vn) | FCVTZS_fixed | FPScale(64 - fbits) | Rn(vn) |
+ Rd(rd));
+ }
+}
+
+
+void Assembler::fcvtzs(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_FCVTZS);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZS_imm);
+ }
+}
+
+
+void Assembler::fcvtzu(const Register& rd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(vn.Is1S() || vn.Is1D());
+ VIXL_ASSERT((fbits >= 0) && (fbits <= rd.SizeInBits()));
+ if (fbits == 0) {
+ Emit(SF(rd) | FPType(vn) | FCVTZU | Rn(vn) | Rd(rd));
+ } else {
+ Emit(SF(rd) | FPType(vn) | FCVTZU_fixed | FPScale(64 - fbits) | Rn(vn) |
+ Rd(rd));
+ }
+}
+
+
+void Assembler::fcvtzu(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_FCVTZU);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_FCVTZU_imm);
+ }
+}
+
+void Assembler::ucvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_UCVTF);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_UCVTF_imm);
+ }
+}
+
+void Assembler::scvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits) {
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ NEONFP2RegMisc(vd, vn, NEON_SCVTF);
+ } else {
+ VIXL_ASSERT(vd.Is1D() || vd.Is1S() || vd.Is2D() || vd.Is2S() || vd.Is4S());
+ NEONShiftRightImmediate(vd, vn, fbits, NEON_SCVTF_imm);
+ }
+}
+
+
+void Assembler::scvtf(const VRegister& vd,
+ const Register& rn,
+ int fbits) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(vd) | SCVTF | Rn(rn) | Rd(vd));
+ } else {
+ Emit(SF(rn) | FPType(vd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(vd));
+ }
+}
+
+
+void Assembler::ucvtf(const VRegister& vd,
+ const Register& rn,
+ int fbits) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(fbits >= 0);
+ if (fbits == 0) {
+ Emit(SF(rn) | FPType(vd) | UCVTF | Rn(rn) | Rd(vd));
+ } else {
+ Emit(SF(rn) | FPType(vd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
+ Rd(vd));
+ }
+}
+
+
+void Assembler::NEON3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3SameOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(vd.IsVector() || !vd.IsQ());
+
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ format = VFormat(vd);
+ }
+
+ Emit(format | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONFP3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Instr op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ Emit(FPFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_FP2REGMISC_LIST(V) \
+ V(fabs, NEON_FABS, FABS) \
+ V(fneg, NEON_FNEG, FNEG) \
+ V(fsqrt, NEON_FSQRT, FSQRT) \
+ V(frintn, NEON_FRINTN, FRINTN) \
+ V(frinta, NEON_FRINTA, FRINTA) \
+ V(frintp, NEON_FRINTP, FRINTP) \
+ V(frintm, NEON_FRINTM, FRINTM) \
+ V(frintx, NEON_FRINTX, FRINTX) \
+ V(frintz, NEON_FRINTZ, FRINTZ) \
+ V(frinti, NEON_FRINTI, FRINTI) \
+ V(frsqrte, NEON_FRSQRTE, NEON_FRSQRTE_scalar) \
+ V(frecpe, NEON_FRECPE, NEON_FRECPE_scalar )
+
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ Instr op; \
+ if (vd.IsScalar()) { \
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
+ op = SCA_OP; \
+ } else { \
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
+ op = VEC_OP; \
+ } \
+ NEONFP2RegMisc(vd, vn, op); \
+}
+NEON_FP2REGMISC_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ Instr op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEON2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ int value) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(value == 0);
+ USE(value);
+
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ format = VFormat(vd);
+ }
+
+ Emit(format | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMEQ_zero, value);
+}
+
+
+void Assembler::cmge(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMGE_zero, value);
+}
+
+
+void Assembler::cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMGT_zero, value);
+}
+
+
+void Assembler::cmle(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMLE_zero, value);
+}
+
+
+void Assembler::cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int value) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_CMLT_zero, value);
+}
+
+
+void Assembler::shll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT((vd.Is8H() && vn.Is8B() && shift == 8) ||
+ (vd.Is4S() && vn.Is4H() && shift == 16) ||
+ (vd.Is2D() && vn.Is2S() && shift == 32));
+ USE(shift);
+ Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::shll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ USE(shift);
+ VIXL_ASSERT((vd.Is8H() && vn.Is16B() && shift == 8) ||
+ (vd.Is4S() && vn.Is8H() && shift == 16) ||
+ (vd.Is2D() && vn.Is4S() && shift == 32));
+ Emit(VFormat(vn) | NEON_SHLL | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ double value) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(value == 0.0);
+ USE(value);
+
+ Instr op = vop;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ op |= NEON_Q | NEONScalar;
+ } else {
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S());
+ }
+
+ Emit(FPFormat(vd) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMEQ_zero, value);
+}
+
+
+void Assembler::fcmge(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMGE_zero, value);
+}
+
+
+void Assembler::fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMGT_zero, value);
+}
+
+
+void Assembler::fcmle(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMLE_zero, value);
+}
+
+
+void Assembler::fcmlt(const VRegister& vd,
+ const VRegister& vn,
+ double value) {
+ NEONFP2RegMisc(vd, vn, NEON_FCMLT_zero, value);
+}
+
+
+void Assembler::frecpx(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar());
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ Emit(FPFormat(vd) | NEON_FRECPX_scalar | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_3SAME_LIST(V) \
+ V(add, NEON_ADD, vd.IsVector() || vd.Is1D()) \
+ V(addp, NEON_ADDP, vd.IsVector() || vd.Is1D()) \
+ V(sub, NEON_SUB, vd.IsVector() || vd.Is1D()) \
+ V(cmeq, NEON_CMEQ, vd.IsVector() || vd.Is1D()) \
+ V(cmge, NEON_CMGE, vd.IsVector() || vd.Is1D()) \
+ V(cmgt, NEON_CMGT, vd.IsVector() || vd.Is1D()) \
+ V(cmhi, NEON_CMHI, vd.IsVector() || vd.Is1D()) \
+ V(cmhs, NEON_CMHS, vd.IsVector() || vd.Is1D()) \
+ V(cmtst, NEON_CMTST, vd.IsVector() || vd.Is1D()) \
+ V(sshl, NEON_SSHL, vd.IsVector() || vd.Is1D()) \
+ V(ushl, NEON_USHL, vd.IsVector() || vd.Is1D()) \
+ V(srshl, NEON_SRSHL, vd.IsVector() || vd.Is1D()) \
+ V(urshl, NEON_URSHL, vd.IsVector() || vd.Is1D()) \
+ V(sqdmulh, NEON_SQDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
+ V(sqrdmulh, NEON_SQRDMULH, vd.IsLaneSizeH() || vd.IsLaneSizeS()) \
+ V(shadd, NEON_SHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uhadd, NEON_UHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(srhadd, NEON_SRHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(urhadd, NEON_URHADD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(shsub, NEON_SHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uhsub, NEON_UHSUB, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smax, NEON_SMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smaxp, NEON_SMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(smin, NEON_SMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(sminp, NEON_SMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umax, NEON_UMAX, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umaxp, NEON_UMAXP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(umin, NEON_UMIN, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uminp, NEON_UMINP, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(saba, NEON_SABA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(sabd, NEON_SABD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uaba, NEON_UABA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(uabd, NEON_UABD, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mla, NEON_MLA, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mls, NEON_MLS, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(mul, NEON_MUL, vd.IsVector() && !vd.IsLaneSizeD()) \
+ V(and_, NEON_AND, vd.Is8B() || vd.Is16B()) \
+ V(orr, NEON_ORR, vd.Is8B() || vd.Is16B()) \
+ V(orn, NEON_ORN, vd.Is8B() || vd.Is16B()) \
+ V(eor, NEON_EOR, vd.Is8B() || vd.Is16B()) \
+ V(bic, NEON_BIC, vd.Is8B() || vd.Is16B()) \
+ V(bit, NEON_BIT, vd.Is8B() || vd.Is16B()) \
+ V(bif, NEON_BIF, vd.Is8B() || vd.Is16B()) \
+ V(bsl, NEON_BSL, vd.Is8B() || vd.Is16B()) \
+ V(pmul, NEON_PMUL, vd.Is8B() || vd.Is16B()) \
+ V(uqadd, NEON_UQADD, true) \
+ V(sqadd, NEON_SQADD, true) \
+ V(uqsub, NEON_UQSUB, true) \
+ V(sqsub, NEON_SQSUB, true) \
+ V(sqshl, NEON_SQSHL, true) \
+ V(uqshl, NEON_UQSHL, true) \
+ V(sqrshl, NEON_SQRSHL, true) \
+ V(uqrshl, NEON_UQRSHL, true)
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ VIXL_ASSERT(AS); \
+ NEON3Same(vd, vn, vm, OP); \
+}
+NEON_3SAME_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_FP3SAME_OP_LIST(V) \
+ V(fadd, NEON_FADD, FADD) \
+ V(fsub, NEON_FSUB, FSUB) \
+ V(fmul, NEON_FMUL, FMUL) \
+ V(fdiv, NEON_FDIV, FDIV) \
+ V(fmax, NEON_FMAX, FMAX) \
+ V(fmaxnm, NEON_FMAXNM, FMAXNM) \
+ V(fmin, NEON_FMIN, FMIN) \
+ V(fminnm, NEON_FMINNM, FMINNM) \
+ V(fmulx, NEON_FMULX, NEON_FMULX_scalar) \
+ V(frecps, NEON_FRECPS, NEON_FRECPS_scalar) \
+ V(frsqrts, NEON_FRSQRTS, NEON_FRSQRTS_scalar) \
+ V(fabd, NEON_FABD, NEON_FABD_scalar) \
+ V(fmla, NEON_FMLA, 0) \
+ V(fmls, NEON_FMLS, 0) \
+ V(facge, NEON_FACGE, NEON_FACGE_scalar) \
+ V(facgt, NEON_FACGT, NEON_FACGT_scalar) \
+ V(fcmeq, NEON_FCMEQ, NEON_FCMEQ_scalar) \
+ V(fcmge, NEON_FCMGE, NEON_FCMGE_scalar) \
+ V(fcmgt, NEON_FCMGT, NEON_FCMGT_scalar) \
+ V(faddp, NEON_FADDP, 0) \
+ V(fmaxp, NEON_FMAXP, 0) \
+ V(fminp, NEON_FMINP, 0) \
+ V(fmaxnmp, NEON_FMAXNMP, 0) \
+ V(fminnmp, NEON_FMINNMP, 0)
+
+#define DEFINE_ASM_FUNC(FN, VEC_OP, SCA_OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ Instr op; \
+ if ((SCA_OP != 0) && vd.IsScalar()) { \
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D()); \
+ op = SCA_OP; \
+ } else { \
+ VIXL_ASSERT(vd.IsVector()); \
+ VIXL_ASSERT(vd.Is2S() || vd.Is2D() || vd.Is4S()); \
+ op = VEC_OP; \
+ } \
+ NEONFP3Same(vd, vn, vm, op); \
+}
+NEON_FP3SAME_OP_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::addp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1D() && vn.Is2D()));
+ Emit(SFormat(vd) | NEON_ADDP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::faddp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FADDP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fmaxp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMAXP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fminp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMINP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fmaxnmp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMAXNMP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::fminnmp(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT((vd.Is1S() && vn.Is2S()) ||
+ (vd.Is1D() && vn.Is2D()));
+ Emit(FPFormat(vd) | NEON_FMINNMP_scalar | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift) {
+ NEONModifiedImmShiftLsl(vd, imm8, left_shift,
+ NEONModifiedImmediate_ORR);
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ if (vd.IsD()) {
+ orr(vd.V8B(), vn.V8B(), vn.V8B());
+ } else {
+ VIXL_ASSERT(vd.IsQ());
+ orr(vd.V16B(), vn.V16B(), vn.V16B());
+ }
+}
+
+
+void Assembler::bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift) {
+ NEONModifiedImmShiftLsl(vd, imm8, left_shift,
+ NEONModifiedImmediate_BIC);
+}
+
+
+void Assembler::movi(const VRegister& vd,
+ const uint64_t imm,
+ Shift shift,
+ const int shift_amount) {
+ VIXL_ASSERT((shift == LSL) || (shift == MSL));
+ if (vd.Is2D() || vd.Is1D()) {
+ VIXL_ASSERT(shift_amount == 0);
+ int imm8 = 0;
+ for (int i = 0; i < 8; ++i) {
+ int byte = (imm >> (i * 8)) & 0xff;
+ VIXL_ASSERT((byte == 0) || (byte == 0xff));
+ if (byte == 0xff) {
+ imm8 |= (1 << i);
+ }
+ }
+ int q = vd.Is2D() ? NEON_Q : 0;
+ Emit(q | NEONModImmOp(1) | NEONModifiedImmediate_MOVI |
+ ImmNEONabcdefgh(imm8) | NEONCmode(0xe) | Rd(vd));
+ } else if (shift == LSL) {
+ VIXL_ASSERT(IsUint8(imm));
+ NEONModifiedImmShiftLsl(vd, static_cast<int>(imm), shift_amount,
+ NEONModifiedImmediate_MOVI);
+ } else {
+ VIXL_ASSERT(IsUint8(imm));
+ NEONModifiedImmShiftMsl(vd, static_cast<int>(imm), shift_amount,
+ NEONModifiedImmediate_MOVI);
+ }
+}
+
+
+void Assembler::mvn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ if (vd.IsD()) {
+ not_(vd.V8B(), vn.V8B());
+ } else {
+ VIXL_ASSERT(vd.IsQ());
+ not_(vd.V16B(), vn.V16B());
+ }
+}
+
+
+void Assembler::mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift,
+ const int shift_amount) {
+ VIXL_ASSERT((shift == LSL) || (shift == MSL));
+ if (shift == LSL) {
+ NEONModifiedImmShiftLsl(vd, imm8, shift_amount,
+ NEONModifiedImmediate_MVNI);
+ } else {
+ NEONModifiedImmShiftMsl(vd, imm8, shift_amount,
+ NEONModifiedImmediate_MVNI);
+ }
+}
+
+
+void Assembler::NEONFPByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vd.Is2S() && vm.Is1S()) ||
+ (vd.Is4S() && vm.Is1S()) ||
+ (vd.Is1S() && vm.Is1S()) ||
+ (vd.Is2D() && vm.Is1D()) ||
+ (vd.Is1D() && vm.Is1D()));
+ VIXL_ASSERT((vm.Is1S() && (vm_index < 4)) ||
+ (vm.Is1D() && (vm_index < 2)));
+
+ Instr op = vop;
+ int index_num_bits = vm.Is1S() ? 2 : 1;
+ if (vd.IsScalar()) {
+ op |= NEON_Q | NEONScalar;
+ }
+
+ Emit(FPFormat(vd) | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT((vd.Is4H() && vm.Is1H()) ||
+ (vd.Is8H() && vm.Is1H()) ||
+ (vd.Is1H() && vm.Is1H()) ||
+ (vd.Is2S() && vm.Is1S()) ||
+ (vd.Is4S() && vm.Is1S()) ||
+ (vd.Is1S() && vm.Is1S()));
+ VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
+ (vm.Is1S() && (vm_index < 4)));
+
+ Instr format, op = vop;
+ int index_num_bits = vm.Is1H() ? 3 : 2;
+ if (vd.IsScalar()) {
+ op |= NEONScalar | NEON_Q;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONByElementL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp vop) {
+ VIXL_ASSERT((vd.Is4S() && vn.Is4H() && vm.Is1H()) ||
+ (vd.Is4S() && vn.Is8H() && vm.Is1H()) ||
+ (vd.Is1S() && vn.Is1H() && vm.Is1H()) ||
+ (vd.Is2D() && vn.Is2S() && vm.Is1S()) ||
+ (vd.Is2D() && vn.Is4S() && vm.Is1S()) ||
+ (vd.Is1D() && vn.Is1S() && vm.Is1S()));
+
+ VIXL_ASSERT((vm.Is1H() && (vm.code() < 16) && (vm_index < 8)) ||
+ (vm.Is1S() && (vm_index < 4)));
+
+ Instr format, op = vop;
+ int index_num_bits = vm.Is1H() ? 3 : 2;
+ if (vd.IsScalar()) {
+ op |= NEONScalar | NEON_Q;
+ format = SFormat(vn);
+ } else {
+ format = VFormat(vn);
+ }
+ Emit(format | op | ImmNEONHLM(vm_index, index_num_bits) |
+ Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+#define NEON_BYELEMENT_LIST(V) \
+ V(mul, NEON_MUL_byelement, vn.IsVector()) \
+ V(mla, NEON_MLA_byelement, vn.IsVector()) \
+ V(mls, NEON_MLS_byelement, vn.IsVector()) \
+ V(sqdmulh, NEON_SQDMULH_byelement, true) \
+ V(sqrdmulh, NEON_SQRDMULH_byelement, true)
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ VIXL_ASSERT(AS); \
+ NEONByElement(vd, vn, vm, vm_index, OP); \
+}
+NEON_BYELEMENT_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_FPBYELEMENT_LIST(V) \
+ V(fmul, NEON_FMUL_byelement) \
+ V(fmla, NEON_FMLA_byelement) \
+ V(fmls, NEON_FMLS_byelement) \
+ V(fmulx, NEON_FMULX_byelement)
+
+
+#define DEFINE_ASM_FUNC(FN, OP) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ NEONFPByElement(vd, vn, vm, vm_index, OP); \
+}
+NEON_FPBYELEMENT_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+#define NEON_BYELEMENT_LONG_LIST(V) \
+ V(sqdmull, NEON_SQDMULL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmull2, NEON_SQDMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlal, NEON_SQDMLAL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmlal2, NEON_SQDMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(sqdmlsl, NEON_SQDMLSL_byelement, vn.IsScalar() || vn.IsD()) \
+ V(sqdmlsl2, NEON_SQDMLSL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smull, NEON_SMULL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smull2, NEON_SMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umull, NEON_UMULL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umull2, NEON_UMULL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smlal, NEON_SMLAL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smlal2, NEON_SMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umlal, NEON_UMLAL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umlal2, NEON_UMLAL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(smlsl, NEON_SMLSL_byelement, vn.IsVector() && vn.IsD()) \
+ V(smlsl2, NEON_SMLSL_byelement, vn.IsVector() && vn.IsQ()) \
+ V(umlsl, NEON_UMLSL_byelement, vn.IsVector() && vn.IsD()) \
+ V(umlsl2, NEON_UMLSL_byelement, vn.IsVector() && vn.IsQ())
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index) { \
+ VIXL_ASSERT(AS); \
+ NEONByElementL(vd, vn, vm, vm_index, OP); \
+}
+NEON_BYELEMENT_LONG_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::suqadd(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SUQADD);
+}
+
+
+void Assembler::usqadd(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_USQADD);
+}
+
+
+void Assembler::abs(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_ABS);
+}
+
+
+void Assembler::sqabs(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SQABS);
+}
+
+
+void Assembler::neg(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEON2RegMisc(vd, vn, NEON_NEG);
+}
+
+
+void Assembler::sqneg(const VRegister& vd,
+ const VRegister& vn) {
+ NEON2RegMisc(vd, vn, NEON_SQNEG);
+}
+
+
+void Assembler::NEONXtn(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop) {
+ Instr format, op = vop;
+ if (vd.IsScalar()) {
+ VIXL_ASSERT((vd.Is1B() && vn.Is1H()) ||
+ (vd.Is1H() && vn.Is1S()) ||
+ (vd.Is1S() && vn.Is1D()));
+ op |= NEON_Q | NEONScalar;
+ format = SFormat(vd);
+ } else {
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H()) ||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ format = VFormat(vd);
+ }
+ Emit(format | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::xtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsD());
+ NEONXtn(vd, vn, NEON_XTN);
+}
+
+
+void Assembler::xtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_XTN);
+}
+
+
+void Assembler::sqxtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_SQXTN);
+}
+
+
+void Assembler::sqxtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_SQXTN);
+}
+
+
+void Assembler::sqxtun(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_SQXTUN);
+}
+
+
+void Assembler::sqxtun2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_SQXTUN);
+}
+
+
+void Assembler::uqxtn(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsScalar() || vd.IsD());
+ NEONXtn(vd, vn, NEON_UQXTN);
+}
+
+
+void Assembler::uqxtn2(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(vd.IsVector() && vd.IsQ());
+ NEONXtn(vd, vn, NEON_UQXTN);
+}
+
+
+// NEON NOT and RBIT are distinguised by bit 22, the bottom bit of "size".
+void Assembler::not_(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vd) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rbit(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | (1 << NEONSize_offset) | NEON_RBIT_NOT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ VIXL_ASSERT((0 <= index) && (index < vd.lanes()));
+ Emit(VFormat(vd) | NEON_EXT | Rm(vm) | ImmNEONExt(index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::dup(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index) {
+ Instr q, scalar;
+
+ // We support vn arguments of the form vn.VxT() or vn.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ case 4: format = NEON_4S; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ format = NEON_2D;
+ break;
+ }
+
+ if (vd.IsScalar()) {
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ VIXL_ASSERT(!vd.Is1D());
+ q = vd.IsD() ? 0 : NEON_Q;
+ scalar = 0;
+ }
+ Emit(q | scalar | NEON_DUP_ELEMENT |
+ ImmNEON5(format, vn_index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(vn.IsScalar());
+ dup(vd, vn, vn_index);
+}
+
+
+void Assembler::dup(const VRegister& vd, const Register& rn) {
+ VIXL_ASSERT(!vd.Is1D());
+ VIXL_ASSERT(vd.Is2D() == rn.IsX());
+ int q = vd.IsD() ? 0 : NEON_Q;
+ Emit(q | NEON_DUP_GENERAL | ImmNEON5(VFormat(vd), 0) | Rn(rn) | Rd(vd));
+}
+
+
+void Assembler::ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vd.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ case 4: format = NEON_4S; break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ format = NEON_2D;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vd_index) &&
+ (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(NEON_INS_ELEMENT | ImmNEON5(format, vd_index) |
+ ImmNEON4(format, vn_index) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ ins(vd, vd_index, vn, vn_index);
+}
+
+
+void Assembler::ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vd.LaneSizeInBytes();
+ NEONFormatField format;
+ switch (lane_size) {
+ case 1: format = NEON_16B; VIXL_ASSERT(rn.IsW()); break;
+ case 2: format = NEON_8H; VIXL_ASSERT(rn.IsW()); break;
+ case 4: format = NEON_4S; VIXL_ASSERT(rn.IsW()); break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ VIXL_ASSERT(rn.IsX());
+ format = NEON_2D;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vd_index) &&
+ (vd_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(NEON_INS_GENERAL | ImmNEON5(format, vd_index) | Rn(rn) | Rd(vd));
+}
+
+
+void Assembler::mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ ins(vd, vd_index, rn);
+}
+
+
+void Assembler::umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s or d.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ Instr q = 0;
+ switch (lane_size) {
+ case 1: format = NEON_16B; VIXL_ASSERT(rd.IsW()); break;
+ case 2: format = NEON_8H; VIXL_ASSERT(rd.IsW()); break;
+ case 4: format = NEON_4S; VIXL_ASSERT(rd.IsW()); break;
+ default:
+ VIXL_ASSERT(lane_size == 8);
+ VIXL_ASSERT(rd.IsX());
+ format = NEON_2D;
+ q = NEON_Q;
+ break;
+ }
+
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(q | NEON_UMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ VIXL_ASSERT(vn.SizeInBytes() >= 4);
+ umov(rd, vn, vn_index);
+}
+
+
+void Assembler::smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ // We support vd arguments of the form vd.VxT() or vd.T(), where x is the
+ // number of lanes, and T is b, h, s.
+ int lane_size = vn.LaneSizeInBytes();
+ NEONFormatField format;
+ Instr q = 0;
+ VIXL_ASSERT(lane_size != 8);
+ switch (lane_size) {
+ case 1: format = NEON_16B; break;
+ case 2: format = NEON_8H; break;
+ default:
+ VIXL_ASSERT(lane_size == 4);
+ VIXL_ASSERT(rd.IsX());
+ format = NEON_4S;
+ break;
+ }
+ q = rd.IsW() ? 0 : NEON_Q;
+ VIXL_ASSERT((0 <= vn_index) &&
+ (vn_index < LaneCountFromFormat(static_cast<VectorFormat>(format))));
+ Emit(q | NEON_SMOV | ImmNEON5(format, vn_index) | Rn(vn) | Rd(rd));
+}
+
+
+void Assembler::cls(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_CLS | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::clz(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_CLZ | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::cnt(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | NEON_CNT | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev16(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B());
+ Emit(VFormat(vn) | NEON_REV16 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev32(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H());
+ Emit(VFormat(vn) | NEON_REV32 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::rev64(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(!vd.Is1D() && !vd.Is2D());
+ Emit(VFormat(vn) | NEON_REV64 | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::ursqrte(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ Emit(VFormat(vn) | NEON_URSQRTE | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::urecpe(const VRegister& vd,
+ const VRegister& vn) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ Emit(VFormat(vn) | NEON_URECPE | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONAddlp(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp op) {
+ VIXL_ASSERT((op == NEON_SADDLP) ||
+ (op == NEON_UADDLP) ||
+ (op == NEON_SADALP) ||
+ (op == NEON_UADALP));
+
+ VIXL_ASSERT((vn.Is8B() && vd.Is4H()) ||
+ (vn.Is4H() && vd.Is2S()) ||
+ (vn.Is2S() && vd.Is1D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::saddlp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_SADDLP);
+}
+
+
+void Assembler::uaddlp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_UADDLP);
+}
+
+
+void Assembler::sadalp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_SADALP);
+}
+
+
+void Assembler::uadalp(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAddlp(vd, vn, NEON_UADALP);
+}
+
+
+void Assembler::NEONAcrossLanesL(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op) {
+ VIXL_ASSERT((vn.Is8B() && vd.Is1H()) ||
+ (vn.Is16B() && vd.Is1H()) ||
+ (vn.Is4H() && vd.Is1S()) ||
+ (vn.Is8H() && vd.Is1S()) ||
+ (vn.Is4S() && vd.Is1D()));
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::saddlv(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAcrossLanesL(vd, vn, NEON_SADDLV);
+}
+
+
+void Assembler::uaddlv(const VRegister& vd,
+ const VRegister& vn) {
+ NEONAcrossLanesL(vd, vn, NEON_UADDLV);
+}
+
+
+void Assembler::NEONAcrossLanes(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op) {
+ VIXL_ASSERT((vn.Is8B() && vd.Is1B()) ||
+ (vn.Is16B() && vd.Is1B()) ||
+ (vn.Is4H() && vd.Is1H()) ||
+ (vn.Is8H() && vd.Is1H()) ||
+ (vn.Is4S() && vd.Is1S()));
+ if ((op & NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ Emit(FPFormat(vn) | op | Rn(vn) | Rd(vd));
+ } else {
+ Emit(VFormat(vn) | op | Rn(vn) | Rd(vd));
+ }
+}
+
+
+#define NEON_ACROSSLANES_LIST(V) \
+ V(fmaxv, NEON_FMAXV, vd.Is1S()) \
+ V(fminv, NEON_FMINV, vd.Is1S()) \
+ V(fmaxnmv, NEON_FMAXNMV, vd.Is1S()) \
+ V(fminnmv, NEON_FMINNMV, vd.Is1S()) \
+ V(addv, NEON_ADDV, true) \
+ V(smaxv, NEON_SMAXV, true) \
+ V(sminv, NEON_SMINV, true) \
+ V(umaxv, NEON_UMAXV, true) \
+ V(uminv, NEON_UMINV, true)
+
+
+#define DEFINE_ASM_FUNC(FN, OP, AS) \
+void Assembler::FN(const VRegister& vd, \
+ const VRegister& vn) { \
+ VIXL_ASSERT(AS); \
+ NEONAcrossLanes(vd, vn, OP); \
+}
+NEON_ACROSSLANES_LIST(DEFINE_ASM_FUNC)
+#undef DEFINE_ASM_FUNC
+
+
+void Assembler::NEONPerm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONPermOp op) {
+ VIXL_ASSERT(AreSameFormat(vd, vn, vm));
+ VIXL_ASSERT(!vd.Is1D());
+ Emit(VFormat(vd) | op | Rm(vm) | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::trn1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_TRN1);
+}
+
+
+void Assembler::trn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_TRN2);
+}
+
+
+void Assembler::uzp1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_UZP1);
+}
+
+
+void Assembler::uzp2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_UZP2);
+}
+
+
+void Assembler::zip1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_ZIP1);
+}
+
+
+void Assembler::zip2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ NEONPerm(vd, vn, vm, NEON_ZIP2);
+}
+
+
+void Assembler::NEONShiftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ NEONShiftImmediateOp op,
+ int immh_immb) {
+ VIXL_ASSERT(AreSameFormat(vd, vn));
+ Instr q, scalar;
+ if (vn.IsScalar()) {
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ q = vd.IsD() ? 0 : NEON_Q;
+ scalar = 0;
+ }
+ Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONShiftLeftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
+ NEONShiftImmediate(vd, vn, op, (laneSizeInBits + shift) << 16);
+}
+
+
+void Assembler::NEONShiftRightImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
+ NEONShiftImmediate(vd, vn, op, ((2 * laneSizeInBits) - shift) << 16);
+}
+
+
+void Assembler::NEONShiftImmediateL(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ int laneSizeInBits = vn.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 0) && (shift < laneSizeInBits));
+ int immh_immb = (laneSizeInBits + shift) << 16;
+
+ VIXL_ASSERT((vn.Is8B() && vd.Is8H()) ||
+ (vn.Is4H() && vd.Is4S()) ||
+ (vn.Is2S() && vd.Is2D()) ||
+ (vn.Is16B() && vd.Is8H())||
+ (vn.Is8H() && vd.Is4S()) ||
+ (vn.Is4S() && vd.Is2D()));
+ Instr q;
+ q = vn.IsD() ? 0 : NEON_Q;
+ Emit(q | op | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::NEONShiftImmediateN(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op) {
+ Instr q, scalar;
+ int laneSizeInBits = vd.LaneSizeInBits();
+ VIXL_ASSERT((shift >= 1) && (shift <= laneSizeInBits));
+ int immh_immb = (2 * laneSizeInBits - shift) << 16;
+
+ if (vn.IsScalar()) {
+ VIXL_ASSERT((vd.Is1B() && vn.Is1H()) ||
+ (vd.Is1H() && vn.Is1S()) ||
+ (vd.Is1S() && vn.Is1D()));
+ q = NEON_Q;
+ scalar = NEONScalar;
+ } else {
+ VIXL_ASSERT((vd.Is8B() && vn.Is8H()) ||
+ (vd.Is4H() && vn.Is4S()) ||
+ (vd.Is2S() && vn.Is2D()) ||
+ (vd.Is16B() && vn.Is8H())||
+ (vd.Is8H() && vn.Is4S()) ||
+ (vd.Is4S() && vn.Is2D()));
+ scalar = 0;
+ q = vd.IsD() ? 0 : NEON_Q;
+ }
+ Emit(q | op | scalar | immh_immb | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::shl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SHL);
+}
+
+
+void Assembler::sli(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SLI);
+}
+
+
+void Assembler::sqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHL_imm);
+}
+
+
+void Assembler::sqshlu(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_SQSHLU);
+}
+
+
+void Assembler::uqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ NEONShiftLeftImmediate(vd, vn, shift, NEON_UQSHL_imm);
+}
+
+
+void Assembler::sshll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsD());
+ NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
+}
+
+
+void Assembler::sshll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsQ());
+ NEONShiftImmediateL(vd, vn, shift, NEON_SSHLL);
+}
+
+
+void Assembler::sxtl(const VRegister& vd,
+ const VRegister& vn) {
+ sshll(vd, vn, 0);
+}
+
+
+void Assembler::sxtl2(const VRegister& vd,
+ const VRegister& vn) {
+ sshll2(vd, vn, 0);
+}
+
+
+void Assembler::ushll(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsD());
+ NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
+}
+
+
+void Assembler::ushll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsQ());
+ NEONShiftImmediateL(vd, vn, shift, NEON_USHLL);
+}
+
+
+void Assembler::uxtl(const VRegister& vd,
+ const VRegister& vn) {
+ ushll(vd, vn, 0);
+}
+
+
+void Assembler::uxtl2(const VRegister& vd,
+ const VRegister& vn) {
+ ushll2(vd, vn, 0);
+}
+
+
+void Assembler::sri(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRI);
+}
+
+
+void Assembler::sshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SSHR);
+}
+
+
+void Assembler::ushr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_USHR);
+}
+
+
+void Assembler::srshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRSHR);
+}
+
+
+void Assembler::urshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_URSHR);
+}
+
+
+void Assembler::ssra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SSRA);
+}
+
+
+void Assembler::usra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_USRA);
+}
+
+
+void Assembler::srsra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_SRSRA);
+}
+
+
+void Assembler::ursra(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsVector() || vd.Is1D());
+ NEONShiftRightImmediate(vd, vn, shift, NEON_URSRA);
+}
+
+
+void Assembler::shrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsD());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
+}
+
+
+void Assembler::shrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SHRN);
+}
+
+
+void Assembler::rshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsD());
+ NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
+}
+
+
+void Assembler::rshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_RSHRN);
+}
+
+
+void Assembler::sqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
+}
+
+
+void Assembler::sqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRN);
+}
+
+
+void Assembler::sqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
+}
+
+
+void Assembler::sqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRN);
+}
+
+
+void Assembler::sqshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
+}
+
+
+void Assembler::sqshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQSHRUN);
+}
+
+
+void Assembler::sqrshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
+}
+
+
+void Assembler::sqrshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_SQRSHRUN);
+}
+
+
+void Assembler::uqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
+}
+
+
+void Assembler::uqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQSHRN);
+}
+
+
+void Assembler::uqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vd.IsD() || (vn.IsScalar() && vd.IsScalar()));
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
+}
+
+
+void Assembler::uqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift) {
+ VIXL_ASSERT(vn.IsVector() && vd.IsQ());
+ NEONShiftImmediateN(vd, vn, shift, NEON_UQRSHRN);
+}
+
+
+// Note:
+// Below, a difference in case for the same letter indicates a
+// negated bit.
+// If b is 1, then B is 0.
+uint32_t Assembler::FP32ToImm8(float imm) {
+ VIXL_ASSERT(IsImmFP32(imm));
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = FloatToRawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+
+ return bit7 | bit6 | bit5_to_0;
+}
+
+
+Instr Assembler::ImmFP32(float imm) {
+ return FP32ToImm8(imm) << ImmFP_offset;
+}
+
+
+uint32_t Assembler::FP64ToImm8(double imm) {
+ VIXL_ASSERT(IsImmFP64(imm));
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = DoubleToRawbits(imm);
+ // bit7: a000.0000
+ uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+}
+
+
+Instr Assembler::ImmFP64(double imm) {
+ return FP64ToImm8(imm) << ImmFP_offset;
+}
+
+
+// Code generation helpers.
+void Assembler::MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op) {
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are zero (a positive 32-bit number) or top
+ // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
+ VIXL_ASSERT(((imm >> kWRegSize) == 0) ||
+ ((imm >> (kWRegSize - 1)) == 0x1ffffffff));
+ imm &= kWRegMask;
+ }
+
+ if (shift >= 0) {
+ // Explicit shift specified.
+ VIXL_ASSERT((shift == 0) || (shift == 16) ||
+ (shift == 32) || (shift == 48));
+ VIXL_ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+ shift /= 16;
+ } else {
+ // Calculate a new immediate and shift combination to encode the immediate
+ // argument.
+ shift = 0;
+ if ((imm & 0xffffffffffff0000) == 0) {
+ // Nothing to do.
+ } else if ((imm & 0xffffffff0000ffff) == 0) {
+ imm >>= 16;
+ shift = 1;
+ } else if ((imm & 0xffff0000ffffffff) == 0) {
+ VIXL_ASSERT(rd.Is64Bits());
+ imm >>= 32;
+ shift = 2;
+ } else if ((imm & 0x0000ffffffffffff) == 0) {
+ VIXL_ASSERT(rd.Is64Bits());
+ imm >>= 48;
+ shift = 3;
+ }
+ }
+
+ VIXL_ASSERT(IsUint16(imm));
+
+ Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
+ Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
+}
+
+
+void Assembler::AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ VIXL_ASSERT(IsImmAddSub(immediate));
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
+ ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
+ } else if (operand.IsShiftedRegister()) {
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ VIXL_ASSERT(operand.shift() != ROR);
+
+ // For instructions of the form:
+ // add/sub wsp, <Wn>, <Wm> [, LSL #0-3 ]
+ // add/sub <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // add/sub wsp, wsp, <Wm> [, LSL #0-3 ]
+ // adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
+ // or their 64-bit register equivalents, convert the operand from shifted to
+ // extended register mode, and emit an add/sub extended instruction.
+ if (rn.IsSP() || rd.IsSP()) {
+ VIXL_ASSERT(!(rd.IsSP() && (S == SetFlags)));
+ DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
+ AddSubExtendedFixed | op);
+ } else {
+ DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
+ }
+ } else {
+ VIXL_ASSERT(operand.IsExtendedRegister());
+ DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
+ }
+}
+
+
+void Assembler::AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ VIXL_ASSERT(rd.size() == operand.reg().size());
+ VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::hlt(int code) {
+ VIXL_ASSERT(IsUint16(code));
+ Emit(HLT | ImmException(code));
+}
+
+
+void Assembler::brk(int code) {
+ VIXL_ASSERT(IsUint16(code));
+ Emit(BRK | ImmException(code));
+}
+
+
+void Assembler::svc(int code) {
+ Emit(SVC | ImmException(code));
+}
+
+
+void Assembler::ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ Instr ccmpop;
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ VIXL_ASSERT(IsImmConditionalCompare(immediate));
+ ccmpop = ConditionalCompareImmediateFixed | op |
+ ImmCondCmp(static_cast<unsigned>(immediate));
+ } else {
+ VIXL_ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+ ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
+ }
+ Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
+}
+
+
+void Assembler::DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ Emit(SF(rn) | op | Rn(rn) | Rd(rd));
+}
+
+
+void Assembler::FPDataProcessing1Source(const VRegister& vd,
+ const VRegister& vn,
+ FPDataProcessing1SourceOp op) {
+ VIXL_ASSERT(vd.Is1H() || vd.Is1S() || vd.Is1D());
+ Emit(FPType(vn) | op | Rn(vn) | Rd(vd));
+}
+
+
+void Assembler::FPDataProcessing3Source(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va,
+ FPDataProcessing3SourceOp op) {
+ VIXL_ASSERT(vd.Is1S() || vd.Is1D());
+ VIXL_ASSERT(AreSameSizeAndType(vd, vn, vm, va));
+ Emit(FPType(vd) | op | Rm(vm) | Rn(vn) | Rd(vd) | Ra(va));
+}
+
+
+void Assembler::NEONModifiedImmShiftLsl(const VRegister& vd,
+ const int imm8,
+ const int left_shift,
+ NEONModifiedImmediateOp op) {
+ VIXL_ASSERT(vd.Is8B() || vd.Is16B() || vd.Is4H() || vd.Is8H() ||
+ vd.Is2S() || vd.Is4S());
+ VIXL_ASSERT((left_shift == 0) || (left_shift == 8) ||
+ (left_shift == 16) || (left_shift == 24));
+ VIXL_ASSERT(IsUint8(imm8));
+
+ int cmode_1, cmode_2, cmode_3;
+ if (vd.Is8B() || vd.Is16B()) {
+ VIXL_ASSERT(op == NEONModifiedImmediate_MOVI);
+ cmode_1 = 1;
+ cmode_2 = 1;
+ cmode_3 = 1;
+ } else {
+ cmode_1 = (left_shift >> 3) & 1;
+ cmode_2 = left_shift >> 4;
+ cmode_3 = 0;
+ if (vd.Is4H() || vd.Is8H()) {
+ VIXL_ASSERT((left_shift == 0) || (left_shift == 8));
+ cmode_3 = 1;
+ }
+ }
+ int cmode = (cmode_3 << 3) | (cmode_2 << 2) | (cmode_1 << 1);
+
+ int q = vd.IsQ() ? NEON_Q : 0;
+
+ Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
+}
+
+
+void Assembler::NEONModifiedImmShiftMsl(const VRegister& vd,
+ const int imm8,
+ const int shift_amount,
+ NEONModifiedImmediateOp op) {
+ VIXL_ASSERT(vd.Is2S() || vd.Is4S());
+ VIXL_ASSERT((shift_amount == 8) || (shift_amount == 16));
+ VIXL_ASSERT(IsUint8(imm8));
+
+ int cmode_0 = (shift_amount >> 4) & 1;
+ int cmode = 0xc | cmode_0;
+
+ int q = vd.IsQ() ? NEON_Q : 0;
+
+ Emit(q | op | ImmNEONabcdefgh(imm8) | NEONCmode(cmode) | Rd(vd));
+}
+
+
+void Assembler::EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned shift_amount) {
+ switch (shift) {
+ case LSL:
+ lsl(rd, rn, shift_amount);
+ break;
+ case LSR:
+ lsr(rd, rn, shift_amount);
+ break;
+ case ASR:
+ asr(rd, rn, shift_amount);
+ break;
+ case ROR:
+ ror(rd, rn, shift_amount);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Assembler::EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift) {
+ VIXL_ASSERT(rd.size() >= rn.size());
+ unsigned reg_size = rd.size();
+ // Use the correct size of register.
+ Register rn_ = Register(rn.code(), rd.size());
+ // Bits extracted are high_bit:0.
+ unsigned high_bit = (8 << (extend & 0x3)) - 1;
+ // Number of bits left in the result that are not introduced by the shift.
+ unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
+
+ if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
+ switch (extend) {
+ case UXTB:
+ case UXTH:
+ case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
+ case SXTB:
+ case SXTH:
+ case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
+ case UXTX:
+ case SXTX: {
+ VIXL_ASSERT(rn.size() == kXRegSize);
+ // Nothing to extend. Just shift.
+ lsl(rd, rn_, left_shift);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ } else {
+ // No need to extend as the extended bits would be shifted away.
+ lsl(rd, rn_, left_shift);
+ }
+}
+
+
+void Assembler::DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op) {
+ Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
+ Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
+ ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
+ dest_reg | RnSP(rn));
+}
+
+
+Instr Assembler::LoadStoreMemOperand(const MemOperand& addr,
+ unsigned access_size,
+ LoadStoreScalingOption option) {
+ Instr base = RnSP(addr.base());
+ int64_t offset = addr.offset();
+
+ if (addr.IsImmediateOffset()) {
+ bool prefer_unscaled = (option == PreferUnscaledOffset) ||
+ (option == RequireUnscaledOffset);
+ if (prefer_unscaled && IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ return base | LoadStoreUnscaledOffsetFixed |
+ ImmLS(static_cast<int>(offset));
+ }
+
+ if ((option != RequireUnscaledOffset) &&
+ IsImmLSScaled(offset, access_size)) {
+ // Use the scaled addressing mode.
+ return base | LoadStoreUnsignedOffsetFixed |
+ ImmLSUnsigned(static_cast<int>(offset) >> access_size);
+ }
+
+ if ((option != RequireScaledOffset) && IsImmLSUnscaled(offset)) {
+ // Use the unscaled addressing mode.
+ return base | LoadStoreUnscaledOffsetFixed |
+ ImmLS(static_cast<int>(offset));
+ }
+ }
+
+ // All remaining addressing modes are register-offset, pre-indexed or
+ // post-indexed modes.
+ VIXL_ASSERT((option != RequireUnscaledOffset) &&
+ (option != RequireScaledOffset));
+
+ if (addr.IsRegisterOffset()) {
+ Extend ext = addr.extend();
+ Shift shift = addr.shift();
+ unsigned shift_amount = addr.shift_amount();
+
+ // LSL is encoded in the option field as UXTX.
+ if (shift == LSL) {
+ ext = UXTX;
+ }
+
+ // Shifts are encoded in one bit, indicating a left shift by the memory
+ // access size.
+ VIXL_ASSERT((shift_amount == 0) || (shift_amount == access_size));
+ return base | LoadStoreRegisterOffsetFixed | Rm(addr.regoffset()) |
+ ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0);
+ }
+
+ if (addr.IsPreIndex() && IsImmLSUnscaled(offset)) {
+ return base | LoadStorePreIndexFixed | ImmLS(static_cast<int>(offset));
+ }
+
+ if (addr.IsPostIndex() && IsImmLSUnscaled(offset)) {
+ return base | LoadStorePostIndexFixed | ImmLS(static_cast<int>(offset));
+ }
+
+ // If this point is reached, the MemOperand (addr) cannot be encoded.
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+
+void Assembler::LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op,
+ LoadStoreScalingOption option) {
+ Emit(op | Rt(rt) | LoadStoreMemOperand(addr, CalcLSDataSize(op), option));
+}
+
+
+void Assembler::Prefetch(PrefetchOperation op,
+ const MemOperand& addr,
+ LoadStoreScalingOption option) {
+ VIXL_ASSERT(addr.IsRegisterOffset() || addr.IsImmediateOffset());
+
+ Instr prfop = ImmPrefetchOperation(op);
+ Emit(PRFM | prfop | LoadStoreMemOperand(addr, kXRegSizeInBytesLog2, option));
+}
+
+
+bool Assembler::IsImmAddSub(int64_t immediate) {
+ return IsUint12(immediate) ||
+ (IsUint12(immediate >> 12) && ((immediate & 0xfff) == 0));
+}
+
+
+bool Assembler::IsImmConditionalCompare(int64_t immediate) {
+ return IsUint5(immediate);
+}
+
+
+bool Assembler::IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = FloatToRawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = DoubleToRawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0x0000ffffffffffff) != 0) {
+ return false;
+ }
+
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
+ return false;
+ }
+
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
+ return false;
+ }
+
+ return true;
+}
+
+
+bool Assembler::IsImmLSPair(int64_t offset, unsigned access_size) {
+ VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
+ bool offset_is_size_multiple =
+ (((offset >> access_size) << access_size) == offset);
+ return offset_is_size_multiple && IsInt7(offset >> access_size);
+}
+
+
+bool Assembler::IsImmLSScaled(int64_t offset, unsigned access_size) {
+ VIXL_ASSERT(access_size <= kQRegSizeInBytesLog2);
+ bool offset_is_size_multiple =
+ (((offset >> access_size) << access_size) == offset);
+ return offset_is_size_multiple && IsUint12(offset >> access_size);
+}
+
+
+bool Assembler::IsImmLSUnscaled(int64_t offset) {
+ return IsInt9(offset);
+}
+
+
+// The movn instruction can generate immediates containing an arbitrary 16-bit
+// value, with remaining bits set, eg. 0xffff1234, 0xffff1234ffffffff.
+bool Assembler::IsImmMovn(uint64_t imm, unsigned reg_size) {
+ return IsImmMovz(~imm, reg_size);
+}
+
+
+// The movz instruction can generate immediates containing an arbitrary 16-bit
+// value, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
+bool Assembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+ return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
+}
+
+
+// Test if a given value can be encoded in the immediate field of a logical
+// instruction.
+// If it can be encoded, the function returns true, and values pointed to by n,
+// imm_s and imm_r are updated with immediates encoded in the format required
+// by the corresponding fields in the logical instruction.
+// If it can not be encoded, the function returns false, and the values pointed
+// to by n, imm_s and imm_r are undefined.
+bool Assembler::IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n,
+ unsigned* imm_s,
+ unsigned* imm_r) {
+ VIXL_ASSERT((width == kWRegSize) || (width == kXRegSize));
+
+ bool negate = false;
+
+ // Logical immediates are encoded using parameters n, imm_s and imm_r using
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1 bits
+ // are set. The pattern is rotated right by R, and repeated across a 32 or
+ // 64-bit value, depending on destination register width.
+ //
+ // Put another way: the basic format of a logical immediate is a single
+ // contiguous stretch of 1 bits, repeated across the whole word at intervals
+ // given by a power of 2. To identify them quickly, we first locate the
+ // lowest stretch of 1 bits, then the next 1 bit above that; that combination
+ // is different for every logical immediate, so it gives us all the
+ // information we need to identify the only logical immediate that our input
+ // could be, and then we simply check if that's the value we actually have.
+ //
+ // (The rotation parameter does give the possibility of the stretch of 1 bits
+ // going 'round the end' of the word. To deal with that, we observe that in
+ // any situation where that happens the bitwise NOT of the value is also a
+ // valid logical immediate. So we simply invert the input whenever its low bit
+ // is set, and then we know that the rotated case can't arise.)
+
+ if (value & 1) {
+ // If the low bit is 1, negate the value, and set a flag to remember that we
+ // did (so that we can adjust the return values appropriately).
+ negate = true;
+ value = ~value;
+ }
+
+ if (width == kWRegSize) {
+ // To handle 32-bit logical immediates, the very easiest thing is to repeat
+ // the input value twice to make a 64-bit word. The correct encoding of that
+ // as a logical immediate will also be the correct encoding of the 32-bit
+ // value.
+
+ // Avoid making the assumption that the most-significant 32 bits are zero by
+ // shifting the value left and duplicating it.
+ value <<= kWRegSize;
+ value |= value >> kWRegSize;
+ }
+
+ // The basic analysis idea: imagine our input word looks like this.
+ //
+ // 0011111000111110001111100011111000111110001111100011111000111110
+ // c b a
+ // |<--d-->|
+ //
+ // We find the lowest set bit (as an actual power-of-2 value, not its index)
+ // and call it a. Then we add a to our original number, which wipes out the
+ // bottommost stretch of set bits and replaces it with a 1 carried into the
+ // next zero bit. Then we look for the new lowest set bit, which is in
+ // position b, and subtract it, so now our number is just like the original
+ // but with the lowest stretch of set bits completely gone. Now we find the
+ // lowest set bit again, which is position c in the diagram above. Then we'll
+ // measure the distance d between bit positions a and c (using CLZ), and that
+ // tells us that the only valid logical immediate that could possibly be equal
+ // to this number is the one in which a stretch of bits running from a to just
+ // below b is replicated every d bits.
+ uint64_t a = LowestSetBit(value);
+ uint64_t value_plus_a = value + a;
+ uint64_t b = LowestSetBit(value_plus_a);
+ uint64_t value_plus_a_minus_b = value_plus_a - b;
+ uint64_t c = LowestSetBit(value_plus_a_minus_b);
+
+ int d, clz_a, out_n;
+ uint64_t mask;
+
+ if (c != 0) {
+ // The general case, in which there is more than one stretch of set bits.
+ // Compute the repeat distance d, and set up a bitmask covering the basic
+ // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
+ // of these cases the N bit of the output will be zero.
+ clz_a = CountLeadingZeros(a, kXRegSize);
+ int clz_c = CountLeadingZeros(c, kXRegSize);
+ d = clz_a - clz_c;
+ mask = ((UINT64_C(1) << d) - 1);
+ out_n = 0;
+ } else {
+ // Handle degenerate cases.
+ //
+ // If any of those 'find lowest set bit' operations didn't find a set bit at
+ // all, then the word will have been zero thereafter, so in particular the
+ // last lowest_set_bit operation will have returned zero. So we can test for
+ // all the special case conditions in one go by seeing if c is zero.
+ if (a == 0) {
+ // The input was zero (or all 1 bits, which will come to here too after we
+ // inverted it at the start of the function), for which we just return
+ // false.
+ return false;
+ } else {
+ // Otherwise, if c was zero but a was not, then there's just one stretch
+ // of set bits in our word, meaning that we have the trivial case of
+ // d == 64 and only one 'repetition'. Set up all the same variables as in
+ // the general case above, and set the N bit in the output.
+ clz_a = CountLeadingZeros(a, kXRegSize);
+ d = 64;
+ mask = ~UINT64_C(0);
+ out_n = 1;
+ }
+ }
+
+ // If the repeat period d is not a power of two, it can't be encoded.
+ if (!IsPowerOf2(d)) {
+ return false;
+ }
+
+ if (((b - a) & ~mask) != 0) {
+ // If the bit stretch (b - a) does not fit within the mask derived from the
+ // repeat period, then fail.
+ return false;
+ }
+
+ // The only possible option is b - a repeated every d bits. Now we're going to
+ // actually construct the valid logical immediate derived from that
+ // specification, and see if it equals our original input.
+ //
+ // To repeat a value every d bits, we multiply it by a number of the form
+ // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
+ // be derived using a table lookup on CLZ(d).
+ static const uint64_t multipliers[] = {
+ 0x0000000000000001UL,
+ 0x0000000100000001UL,
+ 0x0001000100010001UL,
+ 0x0101010101010101UL,
+ 0x1111111111111111UL,
+ 0x5555555555555555UL,
+ };
+ uint64_t multiplier = multipliers[CountLeadingZeros(d, kXRegSize) - 57];
+ uint64_t candidate = (b - a) * multiplier;
+
+ if (value != candidate) {
+ // The candidate pattern doesn't match our input value, so fail.
+ return false;
+ }
+
+ // We have a match! This is a valid logical immediate, so now we have to
+ // construct the bits and pieces of the instruction encoding that generates
+ // it.
+
+ // Count the set bits in our basic stretch. The special case of clz(0) == -1
+ // makes the answer come out right for stretches that reach the very top of
+ // the word (e.g. numbers like 0xffffc00000000000).
+ int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSize);
+ int s = clz_a - clz_b;
+
+ // Decide how many bits to rotate right by, to put the low bit of that basic
+ // stretch in position a.
+ int r;
+ if (negate) {
+ // If we inverted the input right at the start of this function, here's
+ // where we compensate: the number of set bits becomes the number of clear
+ // bits, and the rotation count is based on position b rather than position
+ // a (since b is the location of the 'lowest' 1 bit after inversion).
+ s = d - s;
+ r = (clz_b + 1) & (d - 1);
+ } else {
+ r = (clz_a + 1) & (d - 1);
+ }
+
+ // Now we're done, except for having to encode the S output in such a way that
+ // it gives both the number of set bits and the length of the repeated
+ // segment. The s field is encoded like this:
+ //
+ // imms size S
+ // ssssss 64 UInt(ssssss)
+ // 0sssss 32 UInt(sssss)
+ // 10ssss 16 UInt(ssss)
+ // 110sss 8 UInt(sss)
+ // 1110ss 4 UInt(ss)
+ // 11110s 2 UInt(s)
+ //
+ // So we 'or' (-d << 1) with our computed s to form imms.
+ if ((n != NULL) || (imm_s != NULL) || (imm_r != NULL)) {
+ *n = out_n;
+ *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
+ *imm_r = r;
+ }
+
+ return true;
+}
+
+
+LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
+ VIXL_ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? LDR_x : LDR_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kBRegSize: return LDR_b;
+ case kHRegSize: return LDR_h;
+ case kSRegSize: return LDR_s;
+ case kDRegSize: return LDR_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return LDR_q;
+ }
+ }
+}
+
+
+LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
+ VIXL_ASSERT(rt.IsValid());
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STR_x : STR_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBits()) {
+ case kBRegSize: return STR_b;
+ case kHRegSize: return STR_h;
+ case kSRegSize: return STR_s;
+ case kDRegSize: return STR_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STR_q;
+ }
+ }
+}
+
+
+LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STP_x : STP_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return STP_s;
+ case kDRegSizeInBytes: return STP_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STP_q;
+ }
+ }
+}
+
+
+LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2) {
+ VIXL_ASSERT((STP_w | LoadStorePairLBit) == LDP_w);
+ return static_cast<LoadStorePairOp>(StorePairOpFor(rt, rt2) |
+ LoadStorePairLBit);
+}
+
+
+LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ if (rt.IsRegister()) {
+ return rt.Is64Bits() ? STNP_x : STNP_w;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return STNP_s;
+ case kDRegSizeInBytes: return STNP_d;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return STNP_q;
+ }
+ }
+}
+
+
+LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2) {
+ VIXL_ASSERT((STNP_w | LoadStorePairNonTemporalLBit) == LDNP_w);
+ return static_cast<LoadStorePairNonTemporalOp>(
+ StorePairNonTemporalOpFor(rt, rt2) | LoadStorePairNonTemporalLBit);
+}
+
+
+LoadLiteralOp Assembler::LoadLiteralOpFor(const CPURegister& rt) {
+ if (rt.IsRegister()) {
+ return rt.IsX() ? LDR_x_lit : LDR_w_lit;
+ } else {
+ VIXL_ASSERT(rt.IsVRegister());
+ switch (rt.SizeInBytes()) {
+ case kSRegSizeInBytes: return LDR_s_lit;
+ case kDRegSizeInBytes: return LDR_d_lit;
+ default:
+ VIXL_ASSERT(rt.IsQ());
+ return LDR_q_lit;
+ }
+ }
+}
+
+
+bool Assembler::CPUHas(const CPURegister& rt) const {
+ // Core registers are available without any particular CPU features.
+ if (rt.IsRegister()) return true;
+ VIXL_ASSERT(rt.IsVRegister());
+ // The architecture does not allow FP and NEON to be implemented separately,
+ // but we can crudely categorise them based on register size, since FP only
+ // uses D, S and (occasionally) H registers.
+ if (rt.IsH() || rt.IsS() || rt.IsD()) {
+ return CPUHas(CPUFeatures::kFP) || CPUHas(CPUFeatures::kNEON);
+ }
+ VIXL_ASSERT(rt.IsB() || rt.IsQ());
+ return CPUHas(CPUFeatures::kNEON);
+}
+
+
+bool Assembler::CPUHas(const CPURegister& rt, const CPURegister& rt2) const {
+ // This is currently only used for loads and stores, where rt and rt2 must
+ // have the same size and type. We could extend this to cover other cases if
+ // necessary, but for now we can avoid checking both registers.
+ VIXL_ASSERT(AreSameSizeAndType(rt, rt2));
+ USE(rt2);
+ return CPUHas(rt);
+}
+
+
+bool Assembler::CPUHas(SystemRegister sysreg) const {
+ switch (sysreg) {
+ case RNDR:
+ case RNDRRS:
+ return CPUHas(CPUFeatures::kRNG);
+ case FPCR:
+ case NZCV:
+ break;
+ }
+ return true;
+}
+
+
+bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ int number_of_valid_regs = 0;
+ int number_of_valid_fpregs = 0;
+
+ RegList unique_regs = 0;
+ RegList unique_fpregs = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
+
+ for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+ if (regs[i].IsRegister()) {
+ number_of_valid_regs++;
+ unique_regs |= regs[i].Bit();
+ } else if (regs[i].IsVRegister()) {
+ number_of_valid_fpregs++;
+ unique_fpregs |= regs[i].Bit();
+ } else {
+ VIXL_ASSERT(!regs[i].IsValid());
+ }
+ }
+
+ int number_of_unique_regs = CountSetBits(unique_regs);
+ int number_of_unique_fpregs = CountSetBits(unique_fpregs);
+
+ VIXL_ASSERT(number_of_valid_regs >= number_of_unique_regs);
+ VIXL_ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+
+ return (number_of_valid_regs != number_of_unique_regs) ||
+ (number_of_valid_fpregs != number_of_unique_fpregs);
+}
+
+
+bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
+ const CPURegister& reg3, const CPURegister& reg4,
+ const CPURegister& reg5, const CPURegister& reg6,
+ const CPURegister& reg7, const CPURegister& reg8) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
+ match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
+ match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
+ match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
+ match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
+ return match;
+}
+
+
+bool AreSameFormat(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3, const VRegister& reg4) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() || reg2.IsSameFormat(reg1);
+ match &= !reg3.IsValid() || reg3.IsSameFormat(reg1);
+ match &= !reg4.IsValid() || reg4.IsSameFormat(reg1);
+ return match;
+}
+
+
+bool AreConsecutive(const VRegister& reg1, const VRegister& reg2,
+ const VRegister& reg3, const VRegister& reg4) {
+ VIXL_ASSERT(reg1.IsValid());
+ bool match = true;
+ match &= !reg2.IsValid() ||
+ (reg2.code() == ((reg1.code() + 1) % kNumberOfVRegisters));
+ match &= !reg3.IsValid() ||
+ (reg3.code() == ((reg1.code() + 2) % kNumberOfVRegisters));
+ match &= !reg4.IsValid() ||
+ (reg4.code() == ((reg1.code() + 3) % kNumberOfVRegisters));
+ return match;
+}
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Assembler-vixl.h b/js/src/jit/arm64/vixl/Assembler-vixl.h
new file mode 100644
index 0000000000..3ae415f977
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Assembler-vixl.h
@@ -0,0 +1,4327 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_ASSEMBLER_A64_H_
+#define VIXL_A64_ASSEMBLER_A64_H_
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/MozBaseAssembler-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+#include "jit/JitSpewer.h"
+
+#include "jit/shared/Assembler-shared.h"
+#include "jit/shared/Disassembler-shared.h"
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h"
+
+#if defined(_M_ARM64)
+#ifdef mvn
+#undef mvn
+#endif
+#endif
+
+namespace vixl {
+
+using js::jit::BufferOffset;
+using js::jit::Label;
+using js::jit::Address;
+using js::jit::BaseIndex;
+using js::jit::DisassemblerSpew;
+
+using LabelDoc = DisassemblerSpew::LabelDoc;
+
+typedef uint64_t RegList;
+static const int kRegListSizeInBits = sizeof(RegList) * 8;
+
+
+// Registers.
+
+// Some CPURegister methods can return Register or VRegister types, so we need
+// to declare them in advance.
+class Register;
+class VRegister;
+
+class CPURegister {
+ public:
+ enum RegisterType {
+ // The kInvalid value is used to detect uninitialized static instances,
+ // which are always zero-initialized before any constructors are called.
+ kInvalid = 0,
+ kRegister,
+ kVRegister,
+ kFPRegister = kVRegister,
+ kNoRegister
+ };
+
+ constexpr CPURegister() : code_(0), size_(0), type_(kNoRegister) {
+ }
+
+ constexpr CPURegister(unsigned code, unsigned size, RegisterType type)
+ : code_(code), size_(size), type_(type) {
+ }
+
+ unsigned code() const {
+ VIXL_ASSERT(IsValid());
+ return code_;
+ }
+
+ RegisterType type() const {
+ VIXL_ASSERT(IsValidOrNone());
+ return type_;
+ }
+
+ RegList Bit() const {
+ VIXL_ASSERT(code_ < (sizeof(RegList) * 8));
+ return IsValid() ? (static_cast<RegList>(1) << code_) : 0;
+ }
+
+ unsigned size() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ int SizeInBytes() const {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(size() % 8 == 0);
+ return size_ / 8;
+ }
+
+ int SizeInBits() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ bool Is8Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 8;
+ }
+
+ bool Is16Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 16;
+ }
+
+ bool Is32Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 32;
+ }
+
+ bool Is64Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 64;
+ }
+
+ bool Is128Bits() const {
+ VIXL_ASSERT(IsValid());
+ return size_ == 128;
+ }
+
+ bool IsValid() const {
+ if (IsValidRegister() || IsValidVRegister()) {
+ VIXL_ASSERT(!IsNone());
+ return true;
+ } else {
+ // This assert is hit when the register has not been properly initialized.
+ // One cause for this can be an initialisation order fiasco. See
+ // https://isocpp.org/wiki/faq/ctors#static-init-order for some details.
+ VIXL_ASSERT(IsNone());
+ return false;
+ }
+ }
+
+ bool IsValidRegister() const {
+ return IsRegister() &&
+ ((size_ == kWRegSize) || (size_ == kXRegSize)) &&
+ ((code_ < kNumberOfRegisters) || (code_ == kSPRegInternalCode));
+ }
+
+ bool IsValidVRegister() const {
+ return IsVRegister() &&
+ ((size_ == kBRegSize) || (size_ == kHRegSize) ||
+ (size_ == kSRegSize) || (size_ == kDRegSize) ||
+ (size_ == kQRegSize)) &&
+ (code_ < kNumberOfVRegisters);
+ }
+
+ bool IsValidFPRegister() const {
+ return IsFPRegister() && (code_ < kNumberOfVRegisters);
+ }
+
+ bool IsNone() const {
+ // kNoRegister types should always have size 0 and code 0.
+ VIXL_ASSERT((type_ != kNoRegister) || (code_ == 0));
+ VIXL_ASSERT((type_ != kNoRegister) || (size_ == 0));
+
+ return type_ == kNoRegister;
+ }
+
+ bool Aliases(const CPURegister& other) const {
+ VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return (code_ == other.code_) && (type_ == other.type_);
+ }
+
+ bool Is(const CPURegister& other) const {
+ VIXL_ASSERT(IsValidOrNone() && other.IsValidOrNone());
+ return Aliases(other) && (size_ == other.size_);
+ }
+
+ bool IsZero() const {
+ VIXL_ASSERT(IsValid());
+ return IsRegister() && (code_ == kZeroRegCode);
+ }
+
+ bool IsSP() const {
+ VIXL_ASSERT(IsValid());
+ return IsRegister() && (code_ == kSPRegInternalCode);
+ }
+
+ bool IsRegister() const {
+ return type_ == kRegister;
+ }
+
+ bool IsVRegister() const {
+ return type_ == kVRegister;
+ }
+
+ bool IsFPRegister() const {
+ return IsS() || IsD();
+ }
+
+ bool IsW() const { return IsValidRegister() && Is32Bits(); }
+ bool IsX() const { return IsValidRegister() && Is64Bits(); }
+
+ // These assertions ensure that the size and type of the register are as
+ // described. They do not consider the number of lanes that make up a vector.
+ // So, for example, Is8B() implies IsD(), and Is1D() implies IsD, but IsD()
+ // does not imply Is1D() or Is8B().
+ // Check the number of lanes, ie. the format of the vector, using methods such
+ // as Is8B(), Is1D(), etc. in the VRegister class.
+ bool IsV() const { return IsVRegister(); }
+ bool IsB() const { return IsV() && Is8Bits(); }
+ bool IsH() const { return IsV() && Is16Bits(); }
+ bool IsS() const { return IsV() && Is32Bits(); }
+ bool IsD() const { return IsV() && Is64Bits(); }
+ bool IsQ() const { return IsV() && Is128Bits(); }
+
+ const Register& W() const;
+ const Register& X() const;
+ const VRegister& V() const;
+ const VRegister& B() const;
+ const VRegister& H() const;
+ const VRegister& S() const;
+ const VRegister& D() const;
+ const VRegister& Q() const;
+
+ bool IsSameSizeAndType(const CPURegister& other) const {
+ return (size_ == other.size_) && (type_ == other.type_);
+ }
+
+ protected:
+ unsigned code_;
+ unsigned size_;
+ RegisterType type_;
+
+ private:
+ bool IsValidOrNone() const {
+ return IsValid() || IsNone();
+ }
+};
+
+
+class Register : public CPURegister {
+ public:
+ Register() : CPURegister() {}
+ explicit Register(const CPURegister& other)
+ : CPURegister(other.code(), other.size(), other.type()) {
+ VIXL_ASSERT(IsValidRegister());
+ }
+ constexpr Register(unsigned code, unsigned size)
+ : CPURegister(code, size, kRegister) {}
+
+ constexpr Register(js::jit::Register r, unsigned size)
+ : CPURegister(r.code(), size, kRegister) {}
+
+ bool IsValid() const {
+ VIXL_ASSERT(IsRegister() || IsNone());
+ return IsValidRegister();
+ }
+
+ js::jit::Register asUnsized() const {
+ // asUnsized() is only ever used on temp registers or on registers that
+ // are known not to be SP, and there should be no risk of it being
+ // applied to SP. Check anyway.
+ VIXL_ASSERT(code_ != kSPRegInternalCode);
+ return js::jit::Register::FromCode((js::jit::Register::Code)code_);
+ }
+
+
+ static const Register& WRegFromCode(unsigned code);
+ static const Register& XRegFromCode(unsigned code);
+
+ private:
+ static const Register wregisters[];
+ static const Register xregisters[];
+};
+
+
+class VRegister : public CPURegister {
+ public:
+ VRegister() : CPURegister(), lanes_(1) {}
+ explicit VRegister(const CPURegister& other)
+ : CPURegister(other.code(), other.size(), other.type()), lanes_(1) {
+ VIXL_ASSERT(IsValidVRegister());
+ VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+ constexpr VRegister(unsigned code, unsigned size, unsigned lanes = 1)
+ : CPURegister(code, size, kVRegister), lanes_(lanes) {
+ // VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+ constexpr VRegister(js::jit::FloatRegister r)
+ : CPURegister(r.encoding(), r.size() * 8, kVRegister), lanes_(1) {
+ }
+ constexpr VRegister(js::jit::FloatRegister r, unsigned size)
+ : CPURegister(r.encoding(), size, kVRegister), lanes_(1) {
+ }
+ VRegister(unsigned code, VectorFormat format)
+ : CPURegister(code, RegisterSizeInBitsFromFormat(format), kVRegister),
+ lanes_(IsVectorFormat(format) ? LaneCountFromFormat(format) : 1) {
+ VIXL_ASSERT(IsPowerOf2(lanes_) && (lanes_ <= 16));
+ }
+
+ bool IsValid() const {
+ VIXL_ASSERT(IsVRegister() || IsNone());
+ return IsValidVRegister();
+ }
+
+ static const VRegister& BRegFromCode(unsigned code);
+ static const VRegister& HRegFromCode(unsigned code);
+ static const VRegister& SRegFromCode(unsigned code);
+ static const VRegister& DRegFromCode(unsigned code);
+ static const VRegister& QRegFromCode(unsigned code);
+ static const VRegister& VRegFromCode(unsigned code);
+
+ VRegister V8B() const { return VRegister(code_, kDRegSize, 8); }
+ VRegister V16B() const { return VRegister(code_, kQRegSize, 16); }
+ VRegister V4H() const { return VRegister(code_, kDRegSize, 4); }
+ VRegister V8H() const { return VRegister(code_, kQRegSize, 8); }
+ VRegister V2S() const { return VRegister(code_, kDRegSize, 2); }
+ VRegister V4S() const { return VRegister(code_, kQRegSize, 4); }
+ VRegister V2D() const { return VRegister(code_, kQRegSize, 2); }
+ VRegister V1D() const { return VRegister(code_, kDRegSize, 1); }
+
+ bool Is8B() const { return (Is64Bits() && (lanes_ == 8)); }
+ bool Is16B() const { return (Is128Bits() && (lanes_ == 16)); }
+ bool Is4H() const { return (Is64Bits() && (lanes_ == 4)); }
+ bool Is8H() const { return (Is128Bits() && (lanes_ == 8)); }
+ bool Is2S() const { return (Is64Bits() && (lanes_ == 2)); }
+ bool Is4S() const { return (Is128Bits() && (lanes_ == 4)); }
+ bool Is1D() const { return (Is64Bits() && (lanes_ == 1)); }
+ bool Is2D() const { return (Is128Bits() && (lanes_ == 2)); }
+
+ // For consistency, we assert the number of lanes of these scalar registers,
+ // even though there are no vectors of equivalent total size with which they
+ // could alias.
+ bool Is1B() const {
+ VIXL_ASSERT(!(Is8Bits() && IsVector()));
+ return Is8Bits();
+ }
+ bool Is1H() const {
+ VIXL_ASSERT(!(Is16Bits() && IsVector()));
+ return Is16Bits();
+ }
+ bool Is1S() const {
+ VIXL_ASSERT(!(Is32Bits() && IsVector()));
+ return Is32Bits();
+ }
+
+ bool IsLaneSizeB() const { return LaneSizeInBits() == kBRegSize; }
+ bool IsLaneSizeH() const { return LaneSizeInBits() == kHRegSize; }
+ bool IsLaneSizeS() const { return LaneSizeInBits() == kSRegSize; }
+ bool IsLaneSizeD() const { return LaneSizeInBits() == kDRegSize; }
+
+ int lanes() const {
+ return lanes_;
+ }
+
+ bool IsScalar() const {
+ return lanes_ == 1;
+ }
+
+ bool IsVector() const {
+ return lanes_ > 1;
+ }
+
+ bool IsSameFormat(const VRegister& other) const {
+ return (size_ == other.size_) && (lanes_ == other.lanes_);
+ }
+
+ unsigned LaneSizeInBytes() const {
+ return SizeInBytes() / lanes_;
+ }
+
+ unsigned LaneSizeInBits() const {
+ return LaneSizeInBytes() * 8;
+ }
+
+ private:
+ static const VRegister bregisters[];
+ static const VRegister hregisters[];
+ static const VRegister sregisters[];
+ static const VRegister dregisters[];
+ static const VRegister qregisters[];
+ static const VRegister vregisters[];
+ int lanes_;
+};
+
+
+// Backward compatibility for FPRegisters.
+typedef VRegister FPRegister;
+
+// No*Reg is used to indicate an unused argument, or an error case. Note that
+// these all compare equal (using the Is() method). The Register and VRegister
+// variants are provided for convenience.
+const Register NoReg;
+const VRegister NoVReg;
+const FPRegister NoFPReg; // For backward compatibility.
+const CPURegister NoCPUReg;
+
+
+#define DEFINE_REGISTERS(N) \
+constexpr Register w##N(N, kWRegSize); \
+constexpr Register x##N(N, kXRegSize);
+REGISTER_CODE_LIST(DEFINE_REGISTERS)
+#undef DEFINE_REGISTERS
+constexpr Register wsp(kSPRegInternalCode, kWRegSize);
+constexpr Register sp(kSPRegInternalCode, kXRegSize);
+
+
+#define DEFINE_VREGISTERS(N) \
+constexpr VRegister b##N(N, kBRegSize); \
+constexpr VRegister h##N(N, kHRegSize); \
+constexpr VRegister s##N(N, kSRegSize); \
+constexpr VRegister d##N(N, kDRegSize); \
+constexpr VRegister q##N(N, kQRegSize); \
+constexpr VRegister v##N(N, kQRegSize);
+REGISTER_CODE_LIST(DEFINE_VREGISTERS)
+#undef DEFINE_VREGISTERS
+
+
+// Registers aliases.
+constexpr Register ip0 = x16;
+constexpr Register ip1 = x17;
+constexpr Register lr = x30;
+constexpr Register xzr = x31;
+constexpr Register wzr = w31;
+
+
+// AreAliased returns true if any of the named registers overlap. Arguments
+// set to NoReg are ignored. The system stack pointer may be specified.
+bool AreAliased(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoReg,
+ const CPURegister& reg4 = NoReg,
+ const CPURegister& reg5 = NoReg,
+ const CPURegister& reg6 = NoReg,
+ const CPURegister& reg7 = NoReg,
+ const CPURegister& reg8 = NoReg);
+
+
+// AreSameSizeAndType returns true if all of the specified registers have the
+// same size, and are of the same type. The system stack pointer may be
+// specified. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoCPUReg).
+bool AreSameSizeAndType(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg,
+ const CPURegister& reg5 = NoCPUReg,
+ const CPURegister& reg6 = NoCPUReg,
+ const CPURegister& reg7 = NoCPUReg,
+ const CPURegister& reg8 = NoCPUReg);
+
+
+// AreSameFormat returns true if all of the specified VRegisters have the same
+// vector format. Arguments set to NoReg are ignored, as are any subsequent
+// arguments. At least one argument (reg1) must be valid (not NoVReg).
+bool AreSameFormat(const VRegister& reg1,
+ const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+// AreConsecutive returns true if all of the specified VRegisters are
+// consecutive in the register file. Arguments set to NoReg are ignored, as are
+// any subsequent arguments. At least one argument (reg1) must be valid
+// (not NoVReg).
+bool AreConsecutive(const VRegister& reg1,
+ const VRegister& reg2,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+// Lists of registers.
+class CPURegList {
+ public:
+ explicit CPURegList(CPURegister reg1,
+ CPURegister reg2 = NoCPUReg,
+ CPURegister reg3 = NoCPUReg,
+ CPURegister reg4 = NoCPUReg)
+ : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
+ size_(reg1.size()), type_(reg1.type()) {
+ VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
+ : list_(list), size_(size), type_(type) {
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegList(CPURegister::RegisterType type, unsigned size,
+ unsigned first_reg, unsigned last_reg)
+ : size_(size), type_(type) {
+ VIXL_ASSERT(((type == CPURegister::kRegister) &&
+ (last_reg < kNumberOfRegisters)) ||
+ ((type == CPURegister::kVRegister) &&
+ (last_reg < kNumberOfVRegisters)));
+ VIXL_ASSERT(last_reg >= first_reg);
+ list_ = (UINT64_C(1) << (last_reg + 1)) - 1;
+ list_ &= ~((UINT64_C(1) << first_reg) - 1);
+ VIXL_ASSERT(IsValid());
+ }
+
+ CPURegister::RegisterType type() const {
+ VIXL_ASSERT(IsValid());
+ return type_;
+ }
+
+ // Combine another CPURegList into this one. Registers that already exist in
+ // this list are left unchanged. The type and size of the registers in the
+ // 'other' list must match those in this list.
+ void Combine(const CPURegList& other) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.RegisterSizeInBits() == size_);
+ list_ |= other.list();
+ }
+
+ // Remove every register in the other CPURegList from this one. Registers that
+ // do not exist in this list are ignored. The type and size of the registers
+ // in the 'other' list must match those in this list.
+ void Remove(const CPURegList& other) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.RegisterSizeInBits() == size_);
+ list_ &= ~other.list();
+ }
+
+ // Variants of Combine and Remove which take a single register.
+ void Combine(const CPURegister& other) {
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.size() == size_);
+ Combine(other.code());
+ }
+
+ void Remove(const CPURegister& other) {
+ VIXL_ASSERT(other.type() == type_);
+ VIXL_ASSERT(other.size() == size_);
+ Remove(other.code());
+ }
+
+ // Variants of Combine and Remove which take a single register by its code;
+ // the type and size of the register is inferred from this list.
+ void Combine(int code) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ |= (UINT64_C(1) << code);
+ }
+
+ void Remove(int code) {
+ VIXL_ASSERT(IsValid());
+ VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
+ list_ &= ~(UINT64_C(1) << code);
+ }
+
+ static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
+ VIXL_ASSERT(list_1.type_ == list_2.type_);
+ VIXL_ASSERT(list_1.size_ == list_2.size_);
+ return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
+ }
+ static CPURegList Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3);
+ static CPURegList Union(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4);
+
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2) {
+ VIXL_ASSERT(list_1.type_ == list_2.type_);
+ VIXL_ASSERT(list_1.size_ == list_2.size_);
+ return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
+ }
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3);
+ static CPURegList Intersection(const CPURegList& list_1,
+ const CPURegList& list_2,
+ const CPURegList& list_3,
+ const CPURegList& list_4);
+
+ bool Overlaps(const CPURegList& other) const {
+ return (type_ == other.type_) && ((list_ & other.list_) != 0);
+ }
+
+ RegList list() const {
+ VIXL_ASSERT(IsValid());
+ return list_;
+ }
+
+ void set_list(RegList new_list) {
+ VIXL_ASSERT(IsValid());
+ list_ = new_list;
+ }
+
+ // Remove all callee-saved registers from the list. This can be useful when
+ // preparing registers for an AAPCS64 function call, for example.
+ void RemoveCalleeSaved();
+
+ CPURegister PopLowestIndex();
+ CPURegister PopHighestIndex();
+
+ // AAPCS64 callee-saved registers.
+ static CPURegList GetCalleeSaved(unsigned size = kXRegSize);
+ static CPURegList GetCalleeSavedV(unsigned size = kDRegSize);
+
+ // AAPCS64 caller-saved registers. Note that this includes lr.
+ // TODO(all): Determine how we handle d8-d15 being callee-saved, but the top
+ // 64-bits being caller-saved.
+ static CPURegList GetCallerSaved(unsigned size = kXRegSize);
+ static CPURegList GetCallerSavedV(unsigned size = kDRegSize);
+
+ bool IsEmpty() const {
+ VIXL_ASSERT(IsValid());
+ return list_ == 0;
+ }
+
+ bool IncludesAliasOf(const CPURegister& other) const {
+ VIXL_ASSERT(IsValid());
+ return (type_ == other.type()) && ((other.Bit() & list_) != 0);
+ }
+
+ bool IncludesAliasOf(int code) const {
+ VIXL_ASSERT(IsValid());
+ return ((code & list_) != 0);
+ }
+
+ int Count() const {
+ VIXL_ASSERT(IsValid());
+ return CountSetBits(list_);
+ }
+
+ unsigned RegisterSizeInBits() const {
+ VIXL_ASSERT(IsValid());
+ return size_;
+ }
+
+ unsigned RegisterSizeInBytes() const {
+ int size_in_bits = RegisterSizeInBits();
+ VIXL_ASSERT((size_in_bits % 8) == 0);
+ return size_in_bits / 8;
+ }
+
+ unsigned TotalSizeInBytes() const {
+ VIXL_ASSERT(IsValid());
+ return RegisterSizeInBytes() * Count();
+ }
+
+ private:
+ RegList list_;
+ unsigned size_;
+ CPURegister::RegisterType type_;
+
+ bool IsValid() const;
+};
+
+
+// AAPCS64 callee-saved registers.
+extern const CPURegList kCalleeSaved;
+extern const CPURegList kCalleeSavedV;
+
+
+// AAPCS64 caller-saved registers. Note that this includes lr.
+extern const CPURegList kCallerSaved;
+extern const CPURegList kCallerSavedV;
+
+
+// Operand.
+class Operand {
+ public:
+ // #<immediate>
+ // where <immediate> is int64_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ Operand(int64_t immediate = 0); // NOLINT(runtime/explicit)
+
+ // rm, {<shift> #<shift_amount>}
+ // where <shift> is one of {LSL, LSR, ASR, ROR}.
+ // <shift_amount> is uint6_t.
+ // This is allowed to be an implicit constructor because Operand is
+ // a wrapper class that doesn't normally perform any type conversion.
+ Operand(Register reg,
+ Shift shift = LSL,
+ unsigned shift_amount = 0); // NOLINT(runtime/explicit)
+
+ // rm, {<extend> {#<shift_amount>}}
+ // where <extend> is one of {UXTB, UXTH, UXTW, UXTX, SXTB, SXTH, SXTW, SXTX}.
+ // <shift_amount> is uint2_t.
+ explicit Operand(Register reg, Extend extend, unsigned shift_amount = 0);
+
+ bool IsImmediate() const;
+ bool IsShiftedRegister() const;
+ bool IsExtendedRegister() const;
+ bool IsZero() const;
+
+ // This returns an LSL shift (<= 4) operand as an equivalent extend operand,
+ // which helps in the encoding of instructions that use the stack pointer.
+ Operand ToExtendedRegister() const;
+
+ int64_t immediate() const {
+ VIXL_ASSERT(IsImmediate());
+ return immediate_;
+ }
+
+ Register reg() const {
+ VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return reg_;
+ }
+
+ CPURegister maybeReg() const {
+ if (IsShiftedRegister() || IsExtendedRegister())
+ return reg_;
+ return NoCPUReg;
+ }
+
+ Shift shift() const {
+ VIXL_ASSERT(IsShiftedRegister());
+ return shift_;
+ }
+
+ Extend extend() const {
+ VIXL_ASSERT(IsExtendedRegister());
+ return extend_;
+ }
+
+ unsigned shift_amount() const {
+ VIXL_ASSERT(IsShiftedRegister() || IsExtendedRegister());
+ return shift_amount_;
+ }
+
+ private:
+ int64_t immediate_;
+ Register reg_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// MemOperand represents the addressing mode of a load or store instruction.
+class MemOperand {
+ public:
+ explicit MemOperand(Register base,
+ int64_t offset = 0,
+ AddrMode addrmode = Offset);
+ MemOperand(Register base,
+ Register regoffset,
+ Shift shift = LSL,
+ unsigned shift_amount = 0);
+ MemOperand(Register base,
+ Register regoffset,
+ Extend extend,
+ unsigned shift_amount = 0);
+ MemOperand(Register base,
+ const Operand& offset,
+ AddrMode addrmode = Offset);
+
+ // Adapter constructors using C++11 delegating.
+ // TODO: If sp == kSPRegInternalCode, the xzr check isn't necessary.
+ explicit MemOperand(js::jit::Address addr)
+ : MemOperand(IsHiddenSP(addr.base) ? sp : Register(AsRegister(addr.base), 64),
+ (ptrdiff_t)addr.offset) {
+ }
+
+ const Register& base() const { return base_; }
+ const Register& regoffset() const { return regoffset_; }
+ int64_t offset() const { return offset_; }
+ AddrMode addrmode() const { return addrmode_; }
+ Shift shift() const { return shift_; }
+ Extend extend() const { return extend_; }
+ unsigned shift_amount() const { return shift_amount_; }
+ bool IsImmediateOffset() const;
+ bool IsRegisterOffset() const;
+ bool IsPreIndex() const;
+ bool IsPostIndex() const;
+
+ void AddOffset(int64_t offset);
+
+ private:
+ Register base_;
+ Register regoffset_;
+ int64_t offset_;
+ AddrMode addrmode_;
+ Shift shift_;
+ Extend extend_;
+ unsigned shift_amount_;
+};
+
+
+// Control whether or not position-independent code should be emitted.
+enum PositionIndependentCodeOption {
+ // All code generated will be position-independent; all branches and
+ // references to labels generated with the Label class will use PC-relative
+ // addressing.
+ PositionIndependentCode,
+
+ // Allow VIXL to generate code that refers to absolute addresses. With this
+ // option, it will not be possible to copy the code buffer and run it from a
+ // different address; code must be generated in its final location.
+ PositionDependentCode,
+
+ // Allow VIXL to assume that the bottom 12 bits of the address will be
+ // constant, but that the top 48 bits may change. This allows `adrp` to
+ // function in systems which copy code between pages, but otherwise maintain
+ // 4KB page alignment.
+ PageOffsetDependentCode
+};
+
+
+// Control how scaled- and unscaled-offset loads and stores are generated.
+enum LoadStoreScalingOption {
+ // Prefer scaled-immediate-offset instructions, but emit unscaled-offset,
+ // register-offset, pre-index or post-index instructions if necessary.
+ PreferScaledOffset,
+
+ // Prefer unscaled-immediate-offset instructions, but emit scaled-offset,
+ // register-offset, pre-index or post-index instructions if necessary.
+ PreferUnscaledOffset,
+
+ // Require scaled-immediate-offset instructions.
+ RequireScaledOffset,
+
+ // Require unscaled-immediate-offset instructions.
+ RequireUnscaledOffset
+};
+
+
+// Assembler.
+class Assembler : public MozBaseAssembler {
+ public:
+ Assembler(PositionIndependentCodeOption pic = PositionIndependentCode);
+
+ // System functions.
+
+ // Finalize a code buffer of generated instructions. This function must be
+ // called before executing or copying code from the buffer.
+ void FinalizeCode();
+
+#define COPYENUM(v) static const Condition v = vixl::v
+#define COPYENUM_(v) static const Condition v = vixl::v##_
+ COPYENUM(Equal);
+ COPYENUM(Zero);
+ COPYENUM(NotEqual);
+ COPYENUM(NonZero);
+ COPYENUM(AboveOrEqual);
+ COPYENUM(CarrySet);
+ COPYENUM(Below);
+ COPYENUM(CarryClear);
+ COPYENUM(Signed);
+ COPYENUM(NotSigned);
+ COPYENUM(Overflow);
+ COPYENUM(NoOverflow);
+ COPYENUM(Above);
+ COPYENUM(BelowOrEqual);
+ COPYENUM_(GreaterThanOrEqual);
+ COPYENUM_(LessThan);
+ COPYENUM_(GreaterThan);
+ COPYENUM_(LessThanOrEqual);
+ COPYENUM(Always);
+ COPYENUM(Never);
+#undef COPYENUM
+#undef COPYENUM_
+
+ // Bit set when a DoubleCondition does not map to a single ARM condition.
+ // The MacroAssembler must special-case these conditions, or else
+ // ConditionFromDoubleCondition will complain.
+ static const int DoubleConditionBitSpecial = 0x100;
+
+ enum DoubleCondition {
+ DoubleOrdered = Condition::vc,
+ DoubleEqual = Condition::eq,
+ DoubleNotEqual = Condition::ne | DoubleConditionBitSpecial,
+ DoubleGreaterThan = Condition::gt,
+ DoubleGreaterThanOrEqual = Condition::ge,
+ DoubleLessThan = Condition::lo, // Could also use Condition::mi.
+ DoubleLessThanOrEqual = Condition::ls,
+
+ // If either operand is NaN, these conditions always evaluate to true.
+ DoubleUnordered = Condition::vs,
+ DoubleEqualOrUnordered = Condition::eq | DoubleConditionBitSpecial,
+ DoubleNotEqualOrUnordered = Condition::ne,
+ DoubleGreaterThanOrUnordered = Condition::hi,
+ DoubleGreaterThanOrEqualOrUnordered = Condition::hs,
+ DoubleLessThanOrUnordered = Condition::lt,
+ DoubleLessThanOrEqualOrUnordered = Condition::le
+ };
+
+ static inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+ }
+
+ // This is chaging the condition codes for cmp a, b to the same codes for cmp b, a.
+ static inline Condition InvertCmpCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ switch (cond) {
+ case eq:
+ case ne:
+ return cond;
+ case gt:
+ return le;
+ case le:
+ return gt;
+ case ge:
+ return lt;
+ case lt:
+ return ge;
+ case hi:
+ return lo;
+ case lo:
+ return hi;
+ case hs:
+ return ls;
+ case ls:
+ return hs;
+ case mi:
+ return pl;
+ case pl:
+ return mi;
+ default:
+ MOZ_CRASH("TODO: figure this case out.");
+ }
+ return static_cast<Condition>(cond ^ 1);
+ }
+
+ static inline DoubleCondition InvertCondition(DoubleCondition cond) {
+ switch (cond) {
+ case DoubleOrdered:
+ return DoubleUnordered;
+ case DoubleEqual:
+ return DoubleNotEqualOrUnordered;
+ case DoubleNotEqual:
+ return DoubleEqualOrUnordered;
+ case DoubleGreaterThan:
+ return DoubleLessThanOrEqualOrUnordered;
+ case DoubleGreaterThanOrEqual:
+ return DoubleLessThanOrUnordered;
+ case DoubleLessThan:
+ return DoubleGreaterThanOrEqualOrUnordered;
+ case DoubleLessThanOrEqual:
+ return DoubleGreaterThanOrUnordered;
+ case DoubleUnordered:
+ return DoubleOrdered;
+ case DoubleEqualOrUnordered:
+ return DoubleNotEqual;
+ case DoubleNotEqualOrUnordered:
+ return DoubleEqual;
+ case DoubleGreaterThanOrUnordered:
+ return DoubleLessThanOrEqual;
+ case DoubleGreaterThanOrEqualOrUnordered:
+ return DoubleLessThan;
+ case DoubleLessThanOrUnordered:
+ return DoubleGreaterThanOrEqual;
+ case DoubleLessThanOrEqualOrUnordered:
+ return DoubleGreaterThan;
+ default:
+ MOZ_CRASH("Bad condition");
+ }
+ }
+
+ static inline Condition ConditionFromDoubleCondition(DoubleCondition cond) {
+ VIXL_ASSERT(!(cond & DoubleConditionBitSpecial));
+ return static_cast<Condition>(cond);
+ }
+
+ // Instruction set functions.
+
+ // Branch / Jump instructions.
+ // Branch to register.
+ void br(const Register& xn);
+ static void br(Instruction* at, const Register& xn);
+
+ // Branch with link to register.
+ void blr(const Register& xn);
+ static void blr(Instruction* at, const Register& blr);
+
+ // Branch to register with return hint.
+ void ret(const Register& xn = lr);
+
+ // Unconditional branch to label.
+ BufferOffset b(Label* label);
+
+ // Conditional branch to label.
+ BufferOffset b(Label* label, Condition cond);
+
+ // Unconditional branch to PC offset.
+ BufferOffset b(int imm26, const LabelDoc& doc);
+ static void b(Instruction* at, int imm26);
+
+ // Conditional branch to PC offset.
+ BufferOffset b(int imm19, Condition cond, const LabelDoc& doc);
+ static void b(Instruction*at, int imm19, Condition cond);
+
+ // Branch with link to label.
+ void bl(Label* label);
+
+ // Branch with link to PC offset.
+ void bl(int imm26, const LabelDoc& doc);
+ static void bl(Instruction* at, int imm26);
+
+ // Compare and branch to label if zero.
+ void cbz(const Register& rt, Label* label);
+
+ // Compare and branch to PC offset if zero.
+ void cbz(const Register& rt, int imm19, const LabelDoc& doc);
+ static void cbz(Instruction* at, const Register& rt, int imm19);
+
+ // Compare and branch to label if not zero.
+ void cbnz(const Register& rt, Label* label);
+
+ // Compare and branch to PC offset if not zero.
+ void cbnz(const Register& rt, int imm19, const LabelDoc& doc);
+ static void cbnz(Instruction* at, const Register& rt, int imm19);
+
+ // Table lookup from one register.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Table lookup from two registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm);
+
+ // Table lookup from three registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm);
+
+ // Table lookup from four registers.
+ void tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm);
+
+ // Table lookup extension from one register.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Table lookup extension from two registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm);
+
+ // Table lookup extension from three registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm);
+
+ // Table lookup extension from four registers.
+ void tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm);
+
+ // Test bit and branch to label if zero.
+ void tbz(const Register& rt, unsigned bit_pos, Label* label);
+
+ // Test bit and branch to PC offset if zero.
+ void tbz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc);
+ static void tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14);
+
+ // Test bit and branch to label if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, Label* label);
+
+ // Test bit and branch to PC offset if not zero.
+ void tbnz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc);
+ static void tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14);
+
+ // Address calculation instructions.
+ // Calculate a PC-relative address. Unlike for branches the offset in adr is
+ // unscaled (i.e. the result can be unaligned).
+
+ // Calculate the address of a label.
+ void adr(const Register& rd, Label* label);
+
+ // Calculate the address of a PC offset.
+ void adr(const Register& rd, int imm21, const LabelDoc& doc);
+ static void adr(Instruction* at, const Register& rd, int imm21);
+
+ // Calculate the page address of a label.
+ void adrp(const Register& rd, Label* label);
+
+ // Calculate the page address of a PC offset.
+ void adrp(const Register& rd, int imm21, const LabelDoc& doc);
+ static void adrp(Instruction* at, const Register& rd, int imm21);
+
+ // Data Processing instructions.
+ // Add.
+ void add(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add and update status flags.
+ void adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare negative.
+ void cmn(const Register& rn, const Operand& operand);
+
+ // Subtract.
+ void sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract and update status flags.
+ void subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Compare.
+ void cmp(const Register& rn, const Operand& operand);
+
+ // Negate.
+ void neg(const Register& rd,
+ const Operand& operand);
+
+ // Negate and update status flags.
+ void negs(const Register& rd,
+ const Operand& operand);
+
+ // Add with carry bit.
+ void adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Add with carry bit and update status flags.
+ void adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit.
+ void sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Subtract with carry bit and update status flags.
+ void sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Negate with carry bit.
+ void ngc(const Register& rd,
+ const Operand& operand);
+
+ // Negate with carry bit and update status flags.
+ void ngcs(const Register& rd,
+ const Operand& operand);
+
+ // Logical instructions.
+ // Bitwise and (A & B).
+ void and_(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise and (A & B) and update status flags.
+ BufferOffset ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit test and set flags.
+ BufferOffset tst(const Register& rn, const Operand& operand);
+
+ // Bit clear (A & ~B).
+ void bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bit clear (A & ~B) and update status flags.
+ void bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+
+ // Bitwise or (A | B).
+ void orr(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise nor (A | ~B).
+ void orn(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise eor/xor (A ^ B).
+ void eor(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Bitwise enor/xnor (A ^ ~B).
+ void eon(const Register& rd, const Register& rn, const Operand& operand);
+
+ // Logical shift left by variable.
+ void lslv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Logical shift right by variable.
+ void lsrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Arithmetic shift right by variable.
+ void asrv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Rotate right by variable.
+ void rorv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bitfield instructions.
+ // Bitfield move.
+ void bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Signed bitfield move.
+ void sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Unsigned bitfield move.
+ void ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms);
+
+ // Bfm aliases.
+ // Bitfield insert.
+ void bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Bitfield extract and insert low.
+ void bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ bfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Sbfm aliases.
+ // Arithmetic shift right.
+ void asr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(shift < rd.size());
+ sbfm(rd, rn, shift, rd.size() - 1);
+ }
+
+ // Signed bitfield insert with zero at right.
+ void sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Signed bitfield extract.
+ void sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ sbfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Signed extend byte.
+ void sxtb(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 7);
+ }
+
+ // Signed extend halfword.
+ void sxth(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 15);
+ }
+
+ // Signed extend word.
+ void sxtw(const Register& rd, const Register& rn) {
+ sbfm(rd, rn, 0, 31);
+ }
+
+ // Ubfm aliases.
+ // Logical shift left.
+ void lsl(const Register& rd, const Register& rn, unsigned shift) {
+ unsigned reg_size = rd.size();
+ VIXL_ASSERT(shift < reg_size);
+ ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
+ }
+
+ // Logical shift right.
+ void lsr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(shift < rd.size());
+ ubfm(rd, rn, shift, rd.size() - 1);
+ }
+
+ // Unsigned bitfield insert with zero at right.
+ void ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
+ }
+
+ // Unsigned bitfield extract.
+ void ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(width >= 1);
+ VIXL_ASSERT(lsb + width <= rn.size());
+ ubfm(rd, rn, lsb, lsb + width - 1);
+ }
+
+ // Unsigned extend byte.
+ void uxtb(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 7);
+ }
+
+ // Unsigned extend halfword.
+ void uxth(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 15);
+ }
+
+ // Unsigned extend word.
+ void uxtw(const Register& rd, const Register& rn) {
+ ubfm(rd, rn, 0, 31);
+ }
+
+ // Extract.
+ void extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb);
+
+ // Conditional select: rd = cond ? rn : rm.
+ void csel(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select increment: rd = cond ? rn : rm + 1.
+ void csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select inversion: rd = cond ? rn : ~rm.
+ void csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional select negation: rd = cond ? rn : -rm.
+ void csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond);
+
+ // Conditional set: rd = cond ? 1 : 0.
+ void cset(const Register& rd, Condition cond);
+
+ // Conditional set mask: rd = cond ? -1 : 0.
+ void csetm(const Register& rd, Condition cond);
+
+ // Conditional increment: rd = cond ? rn + 1 : rn.
+ void cinc(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional invert: rd = cond ? ~rn : rn.
+ void cinv(const Register& rd, const Register& rn, Condition cond);
+
+ // Conditional negate: rd = cond ? -rn : rn.
+ void cneg(const Register& rd, const Register& rn, Condition cond);
+
+ // Rotate right.
+ void ror(const Register& rd, const Register& rs, unsigned shift) {
+ extr(rd, rs, rs, shift);
+ }
+
+ // Conditional comparison.
+ // Conditional compare negative.
+ void ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // Conditional compare.
+ void ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // CRC-32 checksum from byte.
+ void crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from half-word.
+ void crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from word.
+ void crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 checksum from double word.
+ void crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from byte.
+ void crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from half-word.
+ void crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32 C checksum from word.
+ void crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // CRC-32C checksum from double word.
+ void crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm);
+
+ // Multiply.
+ void mul(const Register& rd, const Register& rn, const Register& rm);
+
+ // Negated multiply.
+ void mneg(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed long multiply: 32 x 32 -> 64-bit.
+ void smull(const Register& rd, const Register& rn, const Register& rm);
+
+ // Signed multiply high: 64 x 64 -> 64-bit <127:64>.
+ void smulh(const Register& xd, const Register& xn, const Register& xm);
+
+ // Multiply and accumulate.
+ void madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Multiply and subtract.
+ void msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+ void smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply and accumulate: 32 x 32 + 64 -> 64-bit.
+ void umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply: 32 x 32 -> 64-bit.
+ void umull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ umaddl(rd, rn, rm, xzr);
+ }
+
+ // Unsigned multiply high: 64 x 64 -> 64-bit <127:64>.
+ void umulh(const Register& xd,
+ const Register& xn,
+ const Register& xm);
+
+ // Signed long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+ void smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Unsigned long multiply and subtract: 64 - (32 x 32) -> 64-bit.
+ void umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra);
+
+ // Signed integer divide.
+ void sdiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Unsigned integer divide.
+ void udiv(const Register& rd, const Register& rn, const Register& rm);
+
+ // Bit reverse.
+ void rbit(const Register& rd, const Register& rn);
+
+ // Reverse bytes in 16-bit half words.
+ void rev16(const Register& rd, const Register& rn);
+
+ // Reverse bytes in 32-bit words.
+ void rev32(const Register& rd, const Register& rn);
+
+ // Reverse bytes.
+ void rev(const Register& rd, const Register& rn);
+
+ // Count leading zeroes.
+ void clz(const Register& rd, const Register& rn);
+
+ // Count leading sign bits.
+ void cls(const Register& rd, const Register& rn);
+
+ // Memory instructions.
+ // Load integer or FP register.
+ void ldr(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store integer or FP register.
+ void str(const CPURegister& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load word with sign extension.
+ void ldrsw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load byte.
+ void ldrb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store byte.
+ void strb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load byte with sign extension.
+ void ldrsb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load half-word.
+ void ldrh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Store half-word.
+ void strh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load half-word with sign extension.
+ void ldrsh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Load integer or FP register (with unscaled offset).
+ void ldur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store integer or FP register (with unscaled offset).
+ void stur(const CPURegister& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load word with sign extension.
+ void ldursw(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load byte (with unscaled offset).
+ void ldurb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store byte (with unscaled offset).
+ void sturb(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load byte with sign extension (and unscaled offset).
+ void ldursb(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load half-word (with unscaled offset).
+ void ldurh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Store half-word (with unscaled offset).
+ void sturh(const Register& rt, const MemOperand& dst,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load half-word with sign extension (and unscaled offset).
+ void ldursh(const Register& rt, const MemOperand& src,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Load integer or FP register pair.
+ void ldp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair.
+ void stp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load word pair with sign extension.
+ void ldpsw(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Load integer or FP register pair, non-temporal.
+ void ldnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& src);
+
+ // Store integer or FP register pair, non-temporal.
+ void stnp(const CPURegister& rt, const CPURegister& rt2,
+ const MemOperand& dst);
+
+ // Load integer or FP register from pc + imm19 << 2.
+ void ldr(const CPURegister& rt, int imm19);
+ static void ldr(Instruction* at, const CPURegister& rt, int imm19);
+
+ // Load word with sign extension from pc + imm19 << 2.
+ void ldrsw(const Register& rt, int imm19);
+
+ // Store exclusive byte.
+ void stxrb(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store exclusive half-word.
+ void stxrh(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store exclusive register.
+ void stxr(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Load exclusive byte.
+ void ldxrb(const Register& rt, const MemOperand& src);
+
+ // Load exclusive half-word.
+ void ldxrh(const Register& rt, const MemOperand& src);
+
+ // Load exclusive register.
+ void ldxr(const Register& rt, const MemOperand& src);
+
+ // Store exclusive register pair.
+ void stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst);
+
+ // Load exclusive register pair.
+ void ldxp(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Store-release exclusive byte.
+ void stlxrb(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store-release exclusive half-word.
+ void stlxrh(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Store-release exclusive register.
+ void stlxr(const Register& rs, const Register& rt, const MemOperand& dst);
+
+ // Load-acquire exclusive byte.
+ void ldaxrb(const Register& rt, const MemOperand& src);
+
+ // Load-acquire exclusive half-word.
+ void ldaxrh(const Register& rt, const MemOperand& src);
+
+ // Load-acquire exclusive register.
+ void ldaxr(const Register& rt, const MemOperand& src);
+
+ // Store-release exclusive register pair.
+ void stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst);
+
+ // Load-acquire exclusive register pair.
+ void ldaxp(const Register& rt, const Register& rt2, const MemOperand& src);
+
+ // Store-release byte.
+ void stlrb(const Register& rt, const MemOperand& dst);
+
+ // Store-release half-word.
+ void stlrh(const Register& rt, const MemOperand& dst);
+
+ // Store-release register.
+ void stlr(const Register& rt, const MemOperand& dst);
+
+ // Load-acquire byte.
+ void ldarb(const Register& rt, const MemOperand& src);
+
+ // Load-acquire half-word.
+ void ldarh(const Register& rt, const MemOperand& src);
+
+ // Load-acquire register.
+ void ldar(const Register& rt, const MemOperand& src);
+
+ // Prefetch memory.
+ void prfm(PrefetchOperation op, const MemOperand& addr,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ // Prefetch memory (with unscaled offset).
+ void prfum(PrefetchOperation op, const MemOperand& addr,
+ LoadStoreScalingOption option = PreferUnscaledOffset);
+
+ // Prefetch from pc + imm19 << 2.
+ void prfm(PrefetchOperation op, int imm19);
+
+ // Move instructions. The default shift of -1 indicates that the move
+ // instruction will calculate an appropriate 16-bit immediate and left shift
+ // that is equal to the 64-bit immediate argument. If an explicit left shift
+ // is specified (0, 16, 32 or 48), the immediate must be a 16-bit value.
+ //
+ // For movk, an explicit shift can be used to indicate which half word should
+ // be overwritten, eg. movk(x0, 0, 0) will overwrite the least-significant
+ // half word with zero, whereas movk(x0, 0, 48) will overwrite the
+ // most-significant.
+
+ // Move immediate and keep.
+ void movk(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVK);
+ }
+
+ // Move inverted immediate.
+ void movn(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVN);
+ }
+
+ // Move immediate.
+ void movz(const Register& rd, uint64_t imm, int shift = -1) {
+ MoveWide(rd, imm, shift, MOVZ);
+ }
+
+ // Misc instructions.
+ // Monitor debug-mode breakpoint.
+ void brk(int code);
+
+ // Halting debug-mode breakpoint.
+ void hlt(int code);
+
+ // Generate exception targeting EL1.
+ void svc(int code);
+ static void svc(Instruction* at, int code);
+
+ // Move register to register.
+ void mov(const Register& rd, const Register& rn);
+
+ // Move inverted operand to register.
+ void mvn(const Register& rd, const Operand& operand);
+
+ // System instructions.
+ // Move to register from system register.
+ void mrs(const Register& rt, SystemRegister sysreg);
+
+ // Move from register to system register.
+ void msr(SystemRegister sysreg, const Register& rt);
+
+ // System instruction.
+ void sys(int op1, int crn, int crm, int op2, const Register& rt = xzr);
+
+ // System instruction with pre-encoded op (op1:crn:crm:op2).
+ void sys(int op, const Register& rt = xzr);
+
+ // System data cache operation.
+ void dc(DataCacheOp op, const Register& rt);
+
+ // System instruction cache operation.
+ void ic(InstructionCacheOp op, const Register& rt);
+
+ // System hint.
+ BufferOffset hint(SystemHint code);
+ static void hint(Instruction* at, SystemHint code);
+
+ // Clear exclusive monitor.
+ void clrex(int imm4 = 0xf);
+
+ // Data memory barrier.
+ void dmb(BarrierDomain domain, BarrierType type);
+
+ // Data synchronization barrier.
+ void dsb(BarrierDomain domain, BarrierType type);
+
+ // Instruction synchronization barrier.
+ void isb();
+
+ // Alias for system instructions.
+ // No-op.
+ BufferOffset nop() {
+ return hint(NOP);
+ }
+ static void nop(Instruction* at);
+
+ // Alias for system instructions.
+ // Conditional speculation barrier.
+ BufferOffset csdb() {
+ return hint(CSDB);
+ }
+ static void csdb(Instruction* at);
+
+ // FP and NEON instructions.
+ // Move double precision immediate to FP register.
+ void fmov(const VRegister& vd, double imm);
+
+ // Move single precision immediate to FP register.
+ void fmov(const VRegister& vd, float imm);
+
+ // Move FP register to register.
+ void fmov(const Register& rd, const VRegister& fn);
+
+ // Move register to FP register.
+ void fmov(const VRegister& vd, const Register& rn);
+
+ // Move FP register to FP register.
+ void fmov(const VRegister& vd, const VRegister& fn);
+
+ // Move 64-bit register to top half of 128-bit FP register.
+ void fmov(const VRegister& vd, int index, const Register& rn);
+
+ // Move top half of 128-bit FP register to 64-bit register.
+ void fmov(const Register& rd, const VRegister& vn, int index);
+
+ // FP add.
+ void fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP subtract.
+ void fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP multiply.
+ void fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm);
+
+ // FP fused multiply-add.
+ void fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-subtract.
+ void fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-add and negate.
+ void fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP fused multiply-subtract and negate.
+ void fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va);
+
+ // FP multiply-negate scalar.
+ void fnmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP reciprocal exponent scalar.
+ void frecpx(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP divide.
+ void fdiv(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP maximum.
+ void fmax(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP minimum.
+ void fmin(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP maximum number.
+ void fmaxnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP minimum number.
+ void fminnm(const VRegister& vd, const VRegister& fn, const VRegister& vm);
+
+ // FP absolute.
+ void fabs(const VRegister& vd, const VRegister& vn);
+
+ // FP negate.
+ void fneg(const VRegister& vd, const VRegister& vn);
+
+ // FP square root.
+ void fsqrt(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, nearest with ties to away.
+ void frinta(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, implicit rounding.
+ void frinti(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, toward minus infinity.
+ void frintm(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, nearest with ties to even.
+ void frintn(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, toward plus infinity.
+ void frintp(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, exact, implicit rounding.
+ void frintx(const VRegister& vd, const VRegister& vn);
+
+ // FP round to integer, towards zero.
+ void frintz(const VRegister& vd, const VRegister& vn);
+
+ void FPCompareMacro(const VRegister& vn,
+ double value,
+ FPTrapFlags trap);
+
+ void FPCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ FPTrapFlags trap);
+
+ // FP compare registers.
+ void fcmp(const VRegister& vn, const VRegister& vm);
+
+ // FP compare immediate.
+ void fcmp(const VRegister& vn, double value);
+
+ void FPCCompareMacro(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap);
+
+ // FP conditional compare.
+ void fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP signaling compare registers.
+ void fcmpe(const VRegister& vn, const VRegister& vm);
+
+ // FP signaling compare immediate.
+ void fcmpe(const VRegister& vn, double value);
+
+ // FP conditional signaling compare.
+ void fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond);
+
+ // FP conditional select.
+ void fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond);
+
+ // Common FP Convert functions.
+ void NEONFPConvertToInt(const Register& rd,
+ const VRegister& vn,
+ Instr op);
+ void NEONFPConvertToInt(const VRegister& vd,
+ const VRegister& vn,
+ Instr op);
+
+ // FP convert between precisions.
+ void fcvt(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to higher precision.
+ void fcvtl(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to higher precision (second part).
+ void fcvtl2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision.
+ void fcvtn(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower prevision (second part).
+ void fcvtn2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision, rounding to odd.
+ void fcvtxn(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to lower precision, rounding to odd (second part).
+ void fcvtxn2(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to away.
+ void fcvtas(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to away.
+ void fcvtau(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to away.
+ void fcvtas(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to away.
+ void fcvtau(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards -infinity.
+ void fcvtms(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards -infinity.
+ void fcvtmu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards -infinity.
+ void fcvtms(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards -infinity.
+ void fcvtmu(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to even.
+ void fcvtns(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to even.
+ void fcvtnu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, nearest with ties to even.
+ void fcvtns(const VRegister& rd, const VRegister& vn);
+
+ // FP JavaScript convert to signed integer, rounding toward zero [Armv8.3].
+ void fjcvtzs(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, nearest with ties to even.
+ void fcvtnu(const VRegister& rd, const VRegister& vn);
+
+ // FP convert to signed integer or fixed-point, round towards zero.
+ void fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to unsigned integer or fixed-point, round towards zero.
+ void fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to signed integer or fixed-point, round towards zero.
+ void fcvtzs(const VRegister& vd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to unsigned integer or fixed-point, round towards zero.
+ void fcvtzu(const VRegister& vd, const VRegister& vn, int fbits = 0);
+
+ // FP convert to signed integer, round towards +infinity.
+ void fcvtps(const Register& rd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards +infinity.
+ void fcvtpu(const Register& rd, const VRegister& vn);
+
+ // FP convert to signed integer, round towards +infinity.
+ void fcvtps(const VRegister& vd, const VRegister& vn);
+
+ // FP convert to unsigned integer, round towards +infinity.
+ void fcvtpu(const VRegister& vd, const VRegister& vn);
+
+ // Convert signed integer or fixed point to FP.
+ void scvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+ // Convert unsigned integer or fixed point to FP.
+ void ucvtf(const VRegister& fd, const Register& rn, int fbits = 0);
+
+ // Convert signed integer or fixed-point to FP.
+ void scvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+ // Convert unsigned integer or fixed-point to FP.
+ void ucvtf(const VRegister& fd, const VRegister& vn, int fbits = 0);
+
+ // Unsigned absolute difference.
+ void uabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference.
+ void sabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate.
+ void uaba(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate.
+ void saba(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add.
+ void add(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract.
+ void sub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned halving add.
+ void uhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed halving add.
+ void shadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned rounding halving add.
+ void urhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed rounding halving add.
+ void srhadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned halving sub.
+ void uhsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed halving sub.
+ void shsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating add.
+ void uqadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating add.
+ void sqadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating subtract.
+ void uqsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating subtract.
+ void sqsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add pairwise.
+ void addp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add pair of elements scalar.
+ void addp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Multiply-add to accumulator.
+ void mla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply-subtract to accumulator.
+ void mls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply.
+ void mul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Multiply by scalar element.
+ void mul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Multiply-add by scalar element.
+ void mla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Multiply-subtract by scalar element.
+ void mls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-add by scalar element.
+ void smlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-add by scalar element (second part).
+ void smlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-add by scalar element.
+ void umlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-add by scalar element (second part).
+ void umlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-sub by scalar element.
+ void smlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply-sub by scalar element (second part).
+ void smlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-sub by scalar element.
+ void umlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply-sub by scalar element (second part).
+ void umlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply by scalar element.
+ void smull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed long multiply by scalar element (second part).
+ void smull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply by scalar element.
+ void umull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply by scalar element (second part).
+ void umull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating double long multiply by element.
+ void sqdmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating double long multiply by element (second part).
+ void sqdmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-add by element.
+ void sqdmlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-add by element (second part).
+ void sqdmlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-sub by element.
+ void sqdmlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating doubling long multiply-sub by element (second part).
+ void sqdmlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Compare equal.
+ void cmeq(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare signed greater than or equal.
+ void cmge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare signed greater than.
+ void cmgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare unsigned higher.
+ void cmhi(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare unsigned higher or same.
+ void cmhs(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare bitwise test bits nonzero.
+ void cmtst(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Compare bitwise to zero.
+ void cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed greater than or equal to zero.
+ void cmge(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed greater than zero.
+ void cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed less than or equal to zero.
+ void cmle(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Compare signed less than zero.
+ void cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int value);
+
+ // Signed shift left by register.
+ void sshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned shift left by register.
+ void ushl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating shift left by register.
+ void sqshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating shift left by register.
+ void uqshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed rounding shift left by register.
+ void srshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned rounding shift left by register.
+ void urshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating rounding shift left by register.
+ void sqrshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned saturating rounding shift left by register.
+ void uqrshl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise and.
+ void and_(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise or.
+ void orr(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise or immediate.
+ void orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0);
+
+ // Move register to register.
+ void mov(const VRegister& vd,
+ const VRegister& vn);
+
+ // Bitwise orn.
+ void orn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise eor.
+ void eor(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bit clear immediate.
+ void bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0);
+
+ // Bit clear.
+ void bic(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise insert if false.
+ void bif(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise insert if true.
+ void bit(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Bitwise select.
+ void bsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply.
+ void pmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Vector move immediate.
+ void movi(const VRegister& vd,
+ const uint64_t imm,
+ Shift shift = LSL,
+ const int shift_amount = 0);
+
+ // Bitwise not.
+ void mvn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Vector move inverted immediate.
+ void mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift = LSL,
+ const int shift_amount = 0);
+
+ // Signed saturating accumulate of unsigned value.
+ void suqadd(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating accumulate of signed value.
+ void usqadd(const VRegister& vd,
+ const VRegister& vn);
+
+ // Absolute value.
+ void abs(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating absolute value.
+ void sqabs(const VRegister& vd,
+ const VRegister& vn);
+
+ // Negate.
+ void neg(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating negate.
+ void sqneg(const VRegister& vd,
+ const VRegister& vn);
+
+ // Bitwise not.
+ void not_(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract narrow.
+ void xtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract narrow (second part).
+ void xtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract narrow.
+ void sqxtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract narrow (second part).
+ void sqxtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating extract narrow.
+ void uqxtn(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned saturating extract narrow (second part).
+ void uqxtn2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract unsigned narrow.
+ void sqxtun(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed saturating extract unsigned narrow (second part).
+ void sqxtun2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Extract vector from pair of vectors.
+ void ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index);
+
+ // Duplicate vector element to vector or scalar.
+ void dup(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to scalar.
+ void mov(const VRegister& vd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Duplicate general-purpose register to vector.
+ void dup(const VRegister& vd,
+ const Register& rn);
+
+ // Insert vector element from another vector element.
+ void ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to another vector element.
+ void mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index);
+
+ // Insert vector element from general-purpose register.
+ void ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn);
+
+ // Move general-purpose register to a vector element.
+ void mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn);
+
+ // Unsigned move vector element to general-purpose register.
+ void umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Move vector element to general-purpose register.
+ void mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // Signed move vector element to general-purpose register.
+ void smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index);
+
+ // One-element structure load to one register.
+ void ld1(const VRegister& vt,
+ const MemOperand& src);
+
+ // One-element structure load to two registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // One-element structure load to three registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // One-element structure load to four registers.
+ void ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // One-element single structure load to one lane.
+ void ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src);
+
+ // One-element single structure load to all lanes.
+ void ld1r(const VRegister& vt,
+ const MemOperand& src);
+
+ // Two-element structure load.
+ void ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Two-element single structure load to one lane.
+ void ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src);
+
+ // Two-element single structure load to all lanes.
+ void ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Three-element structure load.
+ void ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Three-element single structure load to one lane.
+ void ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src);
+
+ // Three-element single structure load to all lanes.
+ void ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Four-element structure load.
+ void ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Four-element single structure load to one lane.
+ void ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src);
+
+ // Four-element single structure load to all lanes.
+ void ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Count leading sign bits.
+ void cls(const VRegister& vd,
+ const VRegister& vn);
+
+ // Count leading zero bits (vector).
+ void clz(const VRegister& vd,
+ const VRegister& vn);
+
+ // Population count per byte.
+ void cnt(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse bit order.
+ void rbit(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 16-bit halfwords.
+ void rev16(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 32-bit words.
+ void rev32(const VRegister& vd,
+ const VRegister& vn);
+
+ // Reverse elements in 64-bit doublewords.
+ void rev64(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned reciprocal square root estimate.
+ void ursqrte(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned reciprocal estimate.
+ void urecpe(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed pairwise long add.
+ void saddlp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned pairwise long add.
+ void uaddlp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed pairwise long add and accumulate.
+ void sadalp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned pairwise long add and accumulate.
+ void uadalp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Shift left by immediate.
+ void shl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift left by immediate.
+ void sqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift left unsigned by immediate.
+ void sqshlu(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift left by immediate.
+ void uqshl(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift left long by immediate.
+ void sshll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift left long by immediate (second part).
+ void sshll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed extend long.
+ void sxtl(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed extend long (second part).
+ void sxtl2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned shift left long by immediate.
+ void ushll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift left long by immediate (second part).
+ void ushll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift left long by element size.
+ void shll(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift left long by element size (second part).
+ void shll2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned extend long.
+ void uxtl(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned extend long (second part).
+ void uxtl2(const VRegister& vd,
+ const VRegister& vn);
+
+ // Shift left by immediate and insert.
+ void sli(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right by immediate and insert.
+ void sri(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed maximum.
+ void smax(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed pairwise maximum.
+ void smaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add across vector.
+ void addv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed add long across vector.
+ void saddlv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned add long across vector.
+ void uaddlv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP maximum number across vector.
+ void fmaxnmv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP maximum across vector.
+ void fmaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP minimum number across vector.
+ void fminnmv(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP minimum across vector.
+ void fminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed maximum across vector.
+ void smaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Signed minimum.
+ void smin(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed minimum pairwise.
+ void sminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed minimum across vector.
+ void sminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // One-element structure store from one register.
+ void st1(const VRegister& vt,
+ const MemOperand& src);
+
+ // One-element structure store from two registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // One-element structure store from three registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // One-element structure store from four registers.
+ void st1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // One-element single structure store from one lane.
+ void st1(const VRegister& vt,
+ int lane,
+ const MemOperand& src);
+
+ // Two-element structure store from two registers.
+ void st2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src);
+
+ // Two-element single structure store from two lanes.
+ void st2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src);
+
+ // Three-element structure store from three registers.
+ void st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src);
+
+ // Three-element single structure store from three lanes.
+ void st3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src);
+
+ // Four-element structure store from four registers.
+ void st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src);
+
+ // Four-element single structure store from four lanes.
+ void st4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src);
+
+ // Unsigned add long.
+ void uaddl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add long (second part).
+ void uaddl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add wide.
+ void uaddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned add wide (second part).
+ void uaddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add long.
+ void saddl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add long (second part).
+ void saddl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add wide.
+ void saddw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed add wide (second part).
+ void saddw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract long.
+ void usubl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract long (second part).
+ void usubl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract wide.
+ void usubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned subtract wide (second part).
+ void usubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed subtract long.
+ void ssubl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed subtract long (second part).
+ void ssubl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed integer subtract wide.
+ void ssubw(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed integer subtract wide (second part).
+ void ssubw2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned maximum.
+ void umax(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned pairwise maximum.
+ void umaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned maximum across vector.
+ void umaxv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Unsigned minimum.
+ void umin(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned pairwise minimum.
+ void uminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned minimum across vector.
+ void uminv(const VRegister& vd,
+ const VRegister& vn);
+
+ // Transpose vectors (primary).
+ void trn1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Transpose vectors (secondary).
+ void trn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unzip vectors (primary).
+ void uzp1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unzip vectors (secondary).
+ void uzp2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Zip vectors (primary).
+ void zip1(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Zip vectors (secondary).
+ void zip2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed shift right by immediate.
+ void sshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift right by immediate.
+ void ushr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed rounding shift right by immediate.
+ void srshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned rounding shift right by immediate.
+ void urshr(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed shift right by immediate and accumulate.
+ void ssra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned shift right by immediate and accumulate.
+ void usra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed rounding shift right by immediate and accumulate.
+ void srsra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned rounding shift right by immediate and accumulate.
+ void ursra(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right narrow by immediate.
+ void shrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Shift right narrow by immediate (second part).
+ void shrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Rounding shift right narrow by immediate.
+ void rshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Rounding shift right narrow by immediate (second part).
+ void rshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift right narrow by immediate.
+ void uqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating shift right narrow by immediate (second part).
+ void uqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating rounding shift right narrow by immediate.
+ void uqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Unsigned saturating rounding shift right narrow by immediate (second part).
+ void uqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right narrow by immediate.
+ void sqshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right narrow by immediate (second part).
+ void sqshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating rounded shift right narrow by immediate.
+ void sqrshrn(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating rounded shift right narrow by immediate (second part).
+ void sqrshrn2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right unsigned narrow by immediate.
+ void sqshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed saturating shift right unsigned narrow by immediate (second part).
+ void sqshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed sat rounded shift right unsigned narrow by immediate.
+ void sqrshrun(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // Signed sat rounded shift right unsigned narrow by immediate (second part).
+ void sqrshrun2(const VRegister& vd,
+ const VRegister& vn,
+ int shift);
+
+ // FP reciprocal step.
+ void frecps(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP reciprocal estimate.
+ void frecpe(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP reciprocal square root estimate.
+ void frsqrte(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP reciprocal square root step.
+ void frsqrts(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate long.
+ void sabal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference and accumulate long (second part).
+ void sabal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate long.
+ void uabal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference and accumulate long (second part).
+ void uabal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference long.
+ void sabdl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed absolute difference long (second part).
+ void sabdl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference long.
+ void uabdl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned absolute difference long (second part).
+ void uabdl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply long.
+ void pmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Polynomial multiply long (second part).
+ void pmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-add.
+ void smlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-add (second part).
+ void smlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-add.
+ void umlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-add (second part).
+ void umlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-sub.
+ void smlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply-sub (second part).
+ void smlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-sub.
+ void umlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply-sub (second part).
+ void umlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply.
+ void smull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed long multiply (second part).
+ void smull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-add.
+ void sqdmlal(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-add (second part).
+ void sqdmlal2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-subtract.
+ void sqdmlsl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply-subtract (second part).
+ void sqdmlsl2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply.
+ void sqdmull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling long multiply (second part).
+ void sqdmull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling multiply returning high half.
+ void sqdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating rounding doubling multiply returning high half.
+ void sqrdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Signed saturating doubling multiply element returning high half.
+ void sqdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Signed saturating rounding doubling multiply element returning high half.
+ void sqrdmulh(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // Unsigned long multiply long.
+ void umull(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Unsigned long multiply (second part).
+ void umull2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add narrow returning high half.
+ void addhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Add narrow returning high half (second part).
+ void addhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding add narrow returning high half.
+ void raddhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding add narrow returning high half (second part).
+ void raddhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract narrow returning high half.
+ void subhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Subtract narrow returning high half (second part).
+ void subhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding subtract narrow returning high half.
+ void rsubhn(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // Rounding subtract narrow returning high half (second part).
+ void rsubhn2(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply accumulate.
+ void fmla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply subtract.
+ void fmls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP vector multiply extended.
+ void fmulx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP absolute greater than or equal.
+ void facge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP absolute greater than.
+ void facgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP multiply by element.
+ void fmul(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-add to accumulator by element.
+ void fmla(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP fused multiply-sub from accumulator by element.
+ void fmls(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP multiply extended by element.
+ void fmulx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index);
+
+ // FP compare equal.
+ void fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP greater than.
+ void fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP greater than or equal.
+ void fcmge(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP compare equal to zero.
+ void fcmeq(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP greater than zero.
+ void fcmgt(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP greater than or equal to zero.
+ void fcmge(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP less than or equal to zero.
+ void fcmle(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP less than to zero.
+ void fcmlt(const VRegister& vd,
+ const VRegister& vn,
+ double imm);
+
+ // FP absolute difference.
+ void fabd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise add vector.
+ void faddp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise add scalar.
+ void faddp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise maximum vector.
+ void fmaxp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise maximum scalar.
+ void fmaxp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise minimum vector.
+ void fminp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise minimum scalar.
+ void fminp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise maximum number vector.
+ void fmaxnmp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise maximum number scalar.
+ void fmaxnmp(const VRegister& vd,
+ const VRegister& vn);
+
+ // FP pairwise minimum number vector.
+ void fminnmp(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm);
+
+ // FP pairwise minimum number scalar.
+ void fminnmp(const VRegister& vd,
+ const VRegister& vn);
+
+ // Emit generic instructions.
+ // Emit raw instructions into the instruction stream.
+ void dci(Instr raw_inst) { Emit(raw_inst); }
+
+ // Emit 32 bits of data into the instruction stream.
+ void dc32(uint32_t data) {
+ EmitData(&data, sizeof(data));
+ }
+
+ // Emit 64 bits of data into the instruction stream.
+ void dc64(uint64_t data) {
+ EmitData(&data, sizeof(data));
+ }
+
+ // Code generation helpers.
+
+ // Register encoding.
+ static Instr Rd(CPURegister rd) {
+ VIXL_ASSERT(rd.code() != kSPRegInternalCode);
+ return rd.code() << Rd_offset;
+ }
+
+ static Instr Rn(CPURegister rn) {
+ VIXL_ASSERT(rn.code() != kSPRegInternalCode);
+ return rn.code() << Rn_offset;
+ }
+
+ static Instr Rm(CPURegister rm) {
+ VIXL_ASSERT(rm.code() != kSPRegInternalCode);
+ return rm.code() << Rm_offset;
+ }
+
+ static Instr RmNot31(CPURegister rm) {
+ VIXL_ASSERT(rm.code() != kSPRegInternalCode);
+ VIXL_ASSERT(!rm.IsZero());
+ return Rm(rm);
+ }
+
+ static Instr Ra(CPURegister ra) {
+ VIXL_ASSERT(ra.code() != kSPRegInternalCode);
+ return ra.code() << Ra_offset;
+ }
+
+ static Instr Rt(CPURegister rt) {
+ VIXL_ASSERT(rt.code() != kSPRegInternalCode);
+ return rt.code() << Rt_offset;
+ }
+
+ static Instr Rt2(CPURegister rt2) {
+ VIXL_ASSERT(rt2.code() != kSPRegInternalCode);
+ return rt2.code() << Rt2_offset;
+ }
+
+ static Instr Rs(CPURegister rs) {
+ VIXL_ASSERT(rs.code() != kSPRegInternalCode);
+ return rs.code() << Rs_offset;
+ }
+
+ // These encoding functions allow the stack pointer to be encoded, and
+ // disallow the zero register.
+ static Instr RdSP(Register rd) {
+ VIXL_ASSERT(!rd.IsZero());
+ return (rd.code() & kRegCodeMask) << Rd_offset;
+ }
+
+ static Instr RnSP(Register rn) {
+ VIXL_ASSERT(!rn.IsZero());
+ return (rn.code() & kRegCodeMask) << Rn_offset;
+ }
+
+ // Flags encoding.
+ static Instr Flags(FlagsUpdate S) {
+ if (S == SetFlags) {
+ return 1 << FlagsUpdate_offset;
+ } else if (S == LeaveFlags) {
+ return 0 << FlagsUpdate_offset;
+ }
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+
+ static Instr Cond(Condition cond) {
+ return cond << Condition_offset;
+ }
+
+ // PC-relative address encoding.
+ static Instr ImmPCRelAddress(int imm21) {
+ VIXL_ASSERT(IsInt21(imm21));
+ Instr imm = static_cast<Instr>(TruncateToUint21(imm21));
+ Instr immhi = (imm >> ImmPCRelLo_width) << ImmPCRelHi_offset;
+ Instr immlo = imm << ImmPCRelLo_offset;
+ return (immhi & ImmPCRelHi_mask) | (immlo & ImmPCRelLo_mask);
+ }
+
+ // Branch encoding.
+ static Instr ImmUncondBranch(int imm26) {
+ VIXL_ASSERT(IsInt26(imm26));
+ return TruncateToUint26(imm26) << ImmUncondBranch_offset;
+ }
+
+ static Instr ImmCondBranch(int imm19) {
+ VIXL_ASSERT(IsInt19(imm19));
+ return TruncateToUint19(imm19) << ImmCondBranch_offset;
+ }
+
+ static Instr ImmCmpBranch(int imm19) {
+ VIXL_ASSERT(IsInt19(imm19));
+ return TruncateToUint19(imm19) << ImmCmpBranch_offset;
+ }
+
+ static Instr ImmTestBranch(int imm14) {
+ VIXL_ASSERT(IsInt14(imm14));
+ return TruncateToUint14(imm14) << ImmTestBranch_offset;
+ }
+
+ static Instr ImmTestBranchBit(unsigned bit_pos) {
+ VIXL_ASSERT(IsUint6(bit_pos));
+ // Subtract five from the shift offset, as we need bit 5 from bit_pos.
+ unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
+ unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
+ b5 &= ImmTestBranchBit5_mask;
+ b40 &= ImmTestBranchBit40_mask;
+ return b5 | b40;
+ }
+
+ // Data Processing encoding.
+ static Instr SF(Register rd) {
+ return rd.Is64Bits() ? SixtyFourBits : ThirtyTwoBits;
+ }
+
+ static Instr ImmAddSub(int imm) {
+ VIXL_ASSERT(IsImmAddSub(imm));
+ if (IsUint12(imm)) { // No shift required.
+ imm <<= ImmAddSub_offset;
+ } else {
+ imm = ((imm >> 12) << ImmAddSub_offset) | (1 << ShiftAddSub_offset);
+ }
+ return imm;
+ }
+
+ static Instr ImmS(unsigned imms, unsigned reg_size) {
+ VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(imms)) ||
+ ((reg_size == kWRegSize) && IsUint5(imms)));
+ USE(reg_size);
+ return imms << ImmS_offset;
+ }
+
+ static Instr ImmR(unsigned immr, unsigned reg_size) {
+ VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) ||
+ ((reg_size == kWRegSize) && IsUint5(immr)));
+ USE(reg_size);
+ VIXL_ASSERT(IsUint6(immr));
+ return immr << ImmR_offset;
+ }
+
+ static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT(IsUint6(imms));
+ VIXL_ASSERT((reg_size == kXRegSize) || IsUint6(imms + 3));
+ USE(reg_size);
+ return imms << ImmSetBits_offset;
+ }
+
+ static Instr ImmRotate(unsigned immr, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT(((reg_size == kXRegSize) && IsUint6(immr)) ||
+ ((reg_size == kWRegSize) && IsUint5(immr)));
+ USE(reg_size);
+ return immr << ImmRotate_offset;
+ }
+
+ static Instr ImmLLiteral(int imm19) {
+ VIXL_ASSERT(IsInt19(imm19));
+ return TruncateToUint19(imm19) << ImmLLiteral_offset;
+ }
+
+ static Instr BitN(unsigned bitn, unsigned reg_size) {
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
+ USE(reg_size);
+ return bitn << BitN_offset;
+ }
+
+ static Instr ShiftDP(Shift shift) {
+ VIXL_ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+ return shift << ShiftDP_offset;
+ }
+
+ static Instr ImmDPShift(unsigned amount) {
+ VIXL_ASSERT(IsUint6(amount));
+ return amount << ImmDPShift_offset;
+ }
+
+ static Instr ExtendMode(Extend extend) {
+ return extend << ExtendMode_offset;
+ }
+
+ static Instr ImmExtendShift(unsigned left_shift) {
+ VIXL_ASSERT(left_shift <= 4);
+ return left_shift << ImmExtendShift_offset;
+ }
+
+ static Instr ImmCondCmp(unsigned imm) {
+ VIXL_ASSERT(IsUint5(imm));
+ return imm << ImmCondCmp_offset;
+ }
+
+ static Instr Nzcv(StatusFlags nzcv) {
+ return ((nzcv >> Flags_offset) & 0xf) << Nzcv_offset;
+ }
+
+ // MemOperand offset encoding.
+ static Instr ImmLSUnsigned(int imm12) {
+ VIXL_ASSERT(IsUint12(imm12));
+ return imm12 << ImmLSUnsigned_offset;
+ }
+
+ static Instr ImmLS(int imm9) {
+ VIXL_ASSERT(IsInt9(imm9));
+ return TruncateToUint9(imm9) << ImmLS_offset;
+ }
+
+ static Instr ImmLSPair(int imm7, unsigned access_size) {
+ VIXL_ASSERT(((imm7 >> access_size) << access_size) == imm7);
+ int scaled_imm7 = imm7 >> access_size;
+ VIXL_ASSERT(IsInt7(scaled_imm7));
+ return TruncateToUint7(scaled_imm7) << ImmLSPair_offset;
+ }
+
+ static Instr ImmShiftLS(unsigned shift_amount) {
+ VIXL_ASSERT(IsUint1(shift_amount));
+ return shift_amount << ImmShiftLS_offset;
+ }
+
+ static Instr ImmPrefetchOperation(int imm5) {
+ VIXL_ASSERT(IsUint5(imm5));
+ return imm5 << ImmPrefetchOperation_offset;
+ }
+
+ static Instr ImmException(int imm16) {
+ VIXL_ASSERT(IsUint16(imm16));
+ return imm16 << ImmException_offset;
+ }
+
+ static Instr ImmSystemRegister(int imm15) {
+ VIXL_ASSERT(IsUint15(imm15));
+ return imm15 << ImmSystemRegister_offset;
+ }
+
+ static Instr ImmHint(int imm7) {
+ VIXL_ASSERT(IsUint7(imm7));
+ return imm7 << ImmHint_offset;
+ }
+
+ static Instr CRm(int imm4) {
+ VIXL_ASSERT(IsUint4(imm4));
+ return imm4 << CRm_offset;
+ }
+
+ static Instr CRn(int imm4) {
+ VIXL_ASSERT(IsUint4(imm4));
+ return imm4 << CRn_offset;
+ }
+
+ static Instr SysOp(int imm14) {
+ VIXL_ASSERT(IsUint14(imm14));
+ return imm14 << SysOp_offset;
+ }
+
+ static Instr ImmSysOp1(int imm3) {
+ VIXL_ASSERT(IsUint3(imm3));
+ return imm3 << SysOp1_offset;
+ }
+
+ static Instr ImmSysOp2(int imm3) {
+ VIXL_ASSERT(IsUint3(imm3));
+ return imm3 << SysOp2_offset;
+ }
+
+ static Instr ImmBarrierDomain(int imm2) {
+ VIXL_ASSERT(IsUint2(imm2));
+ return imm2 << ImmBarrierDomain_offset;
+ }
+
+ static Instr ImmBarrierType(int imm2) {
+ VIXL_ASSERT(IsUint2(imm2));
+ return imm2 << ImmBarrierType_offset;
+ }
+
+ // Move immediates encoding.
+ static Instr ImmMoveWide(uint64_t imm) {
+ VIXL_ASSERT(IsUint16(imm));
+ return static_cast<Instr>(imm << ImmMoveWide_offset);
+ }
+
+ static Instr ShiftMoveWide(int64_t shift) {
+ VIXL_ASSERT(IsUint2(shift));
+ return static_cast<Instr>(shift << ShiftMoveWide_offset);
+ }
+
+ // FP Immediates.
+ static Instr ImmFP32(float imm);
+ static Instr ImmFP64(double imm);
+
+ // FP register type.
+ static Instr FPType(FPRegister fd) {
+ return fd.Is64Bits() ? FP64 : FP32;
+ }
+
+ static Instr FPScale(unsigned scale) {
+ VIXL_ASSERT(IsUint6(scale));
+ return scale << FPScale_offset;
+ }
+
+ // Immediate field checking helpers.
+ static bool IsImmAddSub(int64_t immediate);
+ static bool IsImmConditionalCompare(int64_t immediate);
+ static bool IsImmFP32(float imm);
+ static bool IsImmFP64(double imm);
+ static bool IsImmLogical(uint64_t value,
+ unsigned width,
+ unsigned* n = NULL,
+ unsigned* imm_s = NULL,
+ unsigned* imm_r = NULL);
+ static bool IsImmLSPair(int64_t offset, unsigned access_size);
+ static bool IsImmLSScaled(int64_t offset, unsigned access_size);
+ static bool IsImmLSUnscaled(int64_t offset);
+ static bool IsImmMovn(uint64_t imm, unsigned reg_size);
+ static bool IsImmMovz(uint64_t imm, unsigned reg_size);
+
+ // Instruction bits for vector format in data processing operations.
+ static Instr VFormat(VRegister vd) {
+ if (vd.Is64Bits()) {
+ switch (vd.lanes()) {
+ case 2: return NEON_2S;
+ case 4: return NEON_4H;
+ case 8: return NEON_8B;
+ default: return 0xffffffff;
+ }
+ } else {
+ VIXL_ASSERT(vd.Is128Bits());
+ switch (vd.lanes()) {
+ case 2: return NEON_2D;
+ case 4: return NEON_4S;
+ case 8: return NEON_8H;
+ case 16: return NEON_16B;
+ default: return 0xffffffff;
+ }
+ }
+ }
+
+ // Instruction bits for vector format in floating point data processing
+ // operations.
+ static Instr FPFormat(VRegister vd) {
+ if (vd.lanes() == 1) {
+ // Floating point scalar formats.
+ VIXL_ASSERT(vd.Is32Bits() || vd.Is64Bits());
+ return vd.Is64Bits() ? FP64 : FP32;
+ }
+
+ // Two lane floating point vector formats.
+ if (vd.lanes() == 2) {
+ VIXL_ASSERT(vd.Is64Bits() || vd.Is128Bits());
+ return vd.Is128Bits() ? NEON_FP_2D : NEON_FP_2S;
+ }
+
+ // Four lane floating point vector format.
+ VIXL_ASSERT((vd.lanes() == 4) && vd.Is128Bits());
+ return NEON_FP_4S;
+ }
+
+ // Instruction bits for vector format in load and store operations.
+ static Instr LSVFormat(VRegister vd) {
+ if (vd.Is64Bits()) {
+ switch (vd.lanes()) {
+ case 1: return LS_NEON_1D;
+ case 2: return LS_NEON_2S;
+ case 4: return LS_NEON_4H;
+ case 8: return LS_NEON_8B;
+ default: return 0xffffffff;
+ }
+ } else {
+ VIXL_ASSERT(vd.Is128Bits());
+ switch (vd.lanes()) {
+ case 2: return LS_NEON_2D;
+ case 4: return LS_NEON_4S;
+ case 8: return LS_NEON_8H;
+ case 16: return LS_NEON_16B;
+ default: return 0xffffffff;
+ }
+ }
+ }
+
+ // Instruction bits for scalar format in data processing operations.
+ static Instr SFormat(VRegister vd) {
+ VIXL_ASSERT(vd.lanes() == 1);
+ switch (vd.SizeInBytes()) {
+ case 1: return NEON_B;
+ case 2: return NEON_H;
+ case 4: return NEON_S;
+ case 8: return NEON_D;
+ default: return 0xffffffff;
+ }
+ }
+
+ static Instr ImmNEONHLM(int index, int num_bits) {
+ int h, l, m;
+ if (num_bits == 3) {
+ VIXL_ASSERT(IsUint3(index));
+ h = (index >> 2) & 1;
+ l = (index >> 1) & 1;
+ m = (index >> 0) & 1;
+ } else if (num_bits == 2) {
+ VIXL_ASSERT(IsUint2(index));
+ h = (index >> 1) & 1;
+ l = (index >> 0) & 1;
+ m = 0;
+ } else {
+ VIXL_ASSERT(IsUint1(index) && (num_bits == 1));
+ h = (index >> 0) & 1;
+ l = 0;
+ m = 0;
+ }
+ return (h << NEONH_offset) | (l << NEONL_offset) | (m << NEONM_offset);
+ }
+
+ static Instr ImmNEONExt(int imm4) {
+ VIXL_ASSERT(IsUint4(imm4));
+ return imm4 << ImmNEONExt_offset;
+ }
+
+ static Instr ImmNEON5(Instr format, int index) {
+ VIXL_ASSERT(IsUint4(index));
+ int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+ int imm5 = (index << (s + 1)) | (1 << s);
+ return imm5 << ImmNEON5_offset;
+ }
+
+ static Instr ImmNEON4(Instr format, int index) {
+ VIXL_ASSERT(IsUint4(index));
+ int s = LaneSizeInBytesLog2FromFormat(static_cast<VectorFormat>(format));
+ int imm4 = index << s;
+ return imm4 << ImmNEON4_offset;
+ }
+
+ static Instr ImmNEONabcdefgh(int imm8) {
+ VIXL_ASSERT(IsUint8(imm8));
+ Instr instr;
+ instr = ((imm8 >> 5) & 7) << ImmNEONabc_offset;
+ instr |= (imm8 & 0x1f) << ImmNEONdefgh_offset;
+ return instr;
+ }
+
+ static Instr NEONCmode(int cmode) {
+ VIXL_ASSERT(IsUint4(cmode));
+ return cmode << NEONCmode_offset;
+ }
+
+ static Instr NEONModImmOp(int op) {
+ VIXL_ASSERT(IsUint1(op));
+ return op << NEONModImmOp_offset;
+ }
+
+ size_t size() const {
+ return SizeOfCodeGenerated();
+ }
+
+ size_t SizeOfCodeGenerated() const {
+ return armbuffer_.size();
+ }
+
+ PositionIndependentCodeOption pic() const {
+ return pic_;
+ }
+
+ CPUFeatures* GetCPUFeatures() { return &cpu_features_; }
+
+ void SetCPUFeatures(const CPUFeatures& cpu_features) {
+ cpu_features_ = cpu_features;
+ }
+
+ bool AllowPageOffsetDependentCode() const {
+ return (pic() == PageOffsetDependentCode) ||
+ (pic() == PositionDependentCode);
+ }
+
+ static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
+ return reg.Is64Bits() ? xzr : wzr;
+ }
+
+
+ protected:
+ void LoadStore(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ void LoadStorePair(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+ void LoadStoreStruct(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreMultiStructOp op);
+ void LoadStoreStruct1(const VRegister& vt,
+ int reg_count,
+ const MemOperand& addr);
+ void LoadStoreStructSingle(const VRegister& vt,
+ uint32_t lane,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op);
+ void LoadStoreStructSingleAllLanes(const VRegister& vt,
+ const MemOperand& addr,
+ NEONLoadStoreSingleStructOp op);
+ void LoadStoreStructVerify(const VRegister& vt,
+ const MemOperand& addr,
+ Instr op);
+
+ void Prefetch(PrefetchOperation op,
+ const MemOperand& addr,
+ LoadStoreScalingOption option = PreferScaledOffset);
+
+ BufferOffset Logical(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+ BufferOffset LogicalImmediate(const Register& rd,
+ const Register& rn,
+ unsigned n,
+ unsigned imm_s,
+ unsigned imm_r,
+ LogicalOp op);
+
+ void ConditionalCompare(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+
+ void AddSubWithCarry(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+
+ // Functions for emulating operands not directly supported by the instruction
+ // set.
+ void EmitShift(const Register& rd,
+ const Register& rn,
+ Shift shift,
+ unsigned amount);
+ void EmitExtendShift(const Register& rd,
+ const Register& rn,
+ Extend extend,
+ unsigned left_shift);
+
+ void AddSub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ void NEONTable(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONTableOp op);
+
+ // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
+ // registers. Only simple loads are supported; sign- and zero-extension (such
+ // as in LDPSW_x or LDRB_w) are not supported.
+ static LoadStoreOp LoadOpFor(const CPURegister& rt);
+ static LoadStorePairOp LoadPairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static LoadStoreOp StoreOpFor(const CPURegister& rt);
+ static LoadStorePairOp StorePairOpFor(const CPURegister& rt,
+ const CPURegister& rt2);
+ static LoadStorePairNonTemporalOp LoadPairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static LoadStorePairNonTemporalOp StorePairNonTemporalOpFor(
+ const CPURegister& rt, const CPURegister& rt2);
+ static LoadLiteralOp LoadLiteralOpFor(const CPURegister& rt);
+
+ // Convenience pass-through for CPU feature checks.
+ bool CPUHas(CPUFeatures::Feature feature0,
+ CPUFeatures::Feature feature1 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature2 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature3 = CPUFeatures::kNone) const {
+ return cpu_features_.Has(feature0, feature1, feature2, feature3);
+ }
+
+ // Determine whether the target CPU has the specified registers, based on the
+ // currently-enabled CPU features. Presence of a register does not imply
+ // support for arbitrary operations on it. For example, CPUs with FP have H
+ // registers, but most half-precision operations require the FPHalf feature.
+ //
+ // These are used to check CPU features in loads and stores that have the same
+ // entry point for both integer and FP registers.
+ bool CPUHas(const CPURegister& rt) const;
+ bool CPUHas(const CPURegister& rt, const CPURegister& rt2) const;
+
+ bool CPUHas(SystemRegister sysreg) const;
+
+ private:
+ static uint32_t FP32ToImm8(float imm);
+ static uint32_t FP64ToImm8(double imm);
+
+ // Instruction helpers.
+ void MoveWide(const Register& rd,
+ uint64_t imm,
+ int shift,
+ MoveWideImmediateOp mov_op);
+ BufferOffset DataProcShiftedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void DataProcExtendedRegister(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ Instr op);
+ void LoadStorePairNonTemporal(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairNonTemporalOp op);
+ void LoadLiteral(const CPURegister& rt, uint64_t imm, LoadLiteralOp op);
+ void ConditionalSelect(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond,
+ ConditionalSelectOp op);
+ void DataProcessing1Source(const Register& rd,
+ const Register& rn,
+ DataProcessing1SourceOp op);
+ void DataProcessing3Source(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra,
+ DataProcessing3SourceOp op);
+ void FPDataProcessing1Source(const VRegister& fd,
+ const VRegister& fn,
+ FPDataProcessing1SourceOp op);
+ void FPDataProcessing3Source(const VRegister& fd,
+ const VRegister& fn,
+ const VRegister& fm,
+ const VRegister& fa,
+ FPDataProcessing3SourceOp op);
+ void NEONAcrossLanesL(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op);
+ void NEONAcrossLanes(const VRegister& vd,
+ const VRegister& vn,
+ NEONAcrossLanesOp op);
+ void NEONModifiedImmShiftLsl(const VRegister& vd,
+ const int imm8,
+ const int left_shift,
+ NEONModifiedImmediateOp op);
+ void NEONModifiedImmShiftMsl(const VRegister& vd,
+ const int imm8,
+ const int shift_amount,
+ NEONModifiedImmediateOp op);
+ void NEONFP2Same(const VRegister& vd,
+ const VRegister& vn,
+ Instr vop);
+ void NEON3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3SameOp vop);
+ void NEONFP3Same(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Instr op);
+ void NEON3DifferentL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEON3DifferentW(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEON3DifferentHN(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEON3DifferentOp vop);
+ void NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ double value = 0.0);
+ void NEON2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop,
+ int value = 0);
+ void NEONFP2RegMisc(const VRegister& vd,
+ const VRegister& vn,
+ Instr op);
+ void NEONAddlp(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp op);
+ void NEONPerm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ NEONPermOp op);
+ void NEONFPByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONByElement(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONByElementL(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int vm_index,
+ NEONByIndexedElementOp op);
+ void NEONShiftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ NEONShiftImmediateOp op,
+ int immh_immb);
+ void NEONShiftLeftImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftRightImmediate(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftImmediateL(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONShiftImmediateN(const VRegister& vd,
+ const VRegister& vn,
+ int shift,
+ NEONShiftImmediateOp op);
+ void NEONXtn(const VRegister& vd,
+ const VRegister& vn,
+ NEON2RegMiscOp vop);
+
+ Instr LoadStoreStructAddrModeField(const MemOperand& addr);
+
+ // Encode the specified MemOperand for the specified access size and scaling
+ // preference.
+ Instr LoadStoreMemOperand(const MemOperand& addr,
+ unsigned access_size,
+ LoadStoreScalingOption option);
+
+ protected:
+ // Prevent generation of a literal pool for the next |maxInst| instructions.
+ // Guarantees instruction linearity.
+ class AutoBlockLiteralPool {
+ ARMBuffer* armbuffer_;
+
+ public:
+ AutoBlockLiteralPool(Assembler* assembler, size_t maxInst)
+ : armbuffer_(&assembler->armbuffer_) {
+ armbuffer_->enterNoPool(maxInst);
+ }
+ ~AutoBlockLiteralPool() {
+ armbuffer_->leaveNoPool();
+ }
+ };
+
+ protected:
+ // Buffer where the code is emitted.
+ PositionIndependentCodeOption pic_;
+
+ CPUFeatures cpu_features_;
+
+#ifdef DEBUG
+ bool finalized_;
+#endif
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_ASSEMBLER_A64_H_
diff --git a/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h b/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h
new file mode 100644
index 0000000000..e13eef6135
--- /dev/null
+++ b/js/src/jit/arm64/vixl/CompilerIntrinsics-vixl.h
@@ -0,0 +1,179 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#ifndef VIXL_COMPILER_INTRINSICS_H
+#define VIXL_COMPILER_INTRINSICS_H
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Helper to check whether the version of GCC used is greater than the specified
+// requirement.
+#define MAJOR 1000000
+#define MINOR 1000
+#if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__)
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
+ ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR + __GNUC_PATCHLEVEL__) >= \
+ ((major) * MAJOR + (minor) * MINOR + (patchlevel)))
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__)
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) \
+ ((__GNUC__ * MAJOR + __GNUC_MINOR__ * MINOR) >= \
+ ((major) * MAJOR + (minor) * MINOR + (patchlevel)))
+#else
+#define GCC_VERSION_OR_NEWER(major, minor, patchlevel) 0
+#endif
+
+
+#if defined(__clang__) && !defined(VIXL_NO_COMPILER_BUILTINS)
+
+#define COMPILER_HAS_BUILTIN_CLRSB (__has_builtin(__builtin_clrsb))
+#define COMPILER_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
+#define COMPILER_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
+#define COMPILER_HAS_BUILTIN_FFS (__has_builtin(__builtin_ffs))
+#define COMPILER_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
+
+#elif defined(__GNUC__) && !defined(VIXL_NO_COMPILER_BUILTINS)
+// The documentation for these builtins is available at:
+// https://gcc.gnu.org/onlinedocs/gcc-$MAJOR.$MINOR.$PATCHLEVEL/gcc//Other-Builtins.html
+
+# define COMPILER_HAS_BUILTIN_CLRSB (GCC_VERSION_OR_NEWER(4, 7, 0))
+# define COMPILER_HAS_BUILTIN_CLZ (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_CTZ (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_FFS (GCC_VERSION_OR_NEWER(3, 4, 0))
+# define COMPILER_HAS_BUILTIN_POPCOUNT (GCC_VERSION_OR_NEWER(3, 4, 0))
+
+#else
+// One can define VIXL_NO_COMPILER_BUILTINS to force using the manually
+// implemented C++ methods.
+
+#define COMPILER_HAS_BUILTIN_BSWAP false
+#define COMPILER_HAS_BUILTIN_CLRSB false
+#define COMPILER_HAS_BUILTIN_CLZ false
+#define COMPILER_HAS_BUILTIN_CTZ false
+#define COMPILER_HAS_BUILTIN_FFS false
+#define COMPILER_HAS_BUILTIN_POPCOUNT false
+
+#endif
+
+
+template<typename V>
+inline bool IsPowerOf2(V value) {
+ return (value != 0) && ((value & (value - 1)) == 0);
+}
+
+
+// Implementation of intrinsics functions.
+// TODO: The implementations could be improved for sizes different from 32bit
+// and 64bit: we could mask the values and call the appropriate builtin.
+
+
+template<typename V>
+inline int CountLeadingZeros(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CLZ
+ if (width == 32) {
+ return (value == 0) ? 32 : __builtin_clz(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return (value == 0) ? 64 : __builtin_clzll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountLeadingZeroes32(value);
+ } else if (width == 64) {
+ return mozilla::CountLeadingZeroes64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+
+template<typename V>
+inline int CountLeadingSignBits(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CLRSB
+ if (width == 32) {
+ return __builtin_clrsb(value);
+ } else if (width == 64) {
+ return __builtin_clrsbll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ VIXL_ASSERT(IsPowerOf2(width) && (width <= 64));
+ if (value >= 0) {
+ return CountLeadingZeros(value, width) - 1;
+ } else {
+ return CountLeadingZeros(~value, width) - 1;
+ }
+#endif
+}
+
+
+template<typename V>
+inline int CountSetBits(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_POPCOUNT
+ if (width == 32) {
+ return __builtin_popcount(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return __builtin_popcountll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountPopulation32(value);
+ } else if (width == 64) {
+ return mozilla::CountPopulation64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+
+template<typename V>
+inline int CountTrailingZeros(V value, int width = (sizeof(V) * 8)) {
+#if COMPILER_HAS_BUILTIN_CTZ
+ if (width == 32) {
+ return (value == 0) ? 32 : __builtin_ctz(static_cast<unsigned>(value));
+ } else if (width == 64) {
+ return (value == 0) ? 64 : __builtin_ctzll(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#else
+ if (width == 32) {
+ return mozilla::CountTrailingZeroes32(value);
+ } else if (width == 64) {
+ return mozilla::CountTrailingZeroes64(value);
+ }
+ MOZ_CRASH("Unhandled width.");
+#endif
+}
+
+} // namespace vixl
+
+#endif // VIXL_COMPILER_INTRINSICS_H
+
diff --git a/js/src/jit/arm64/vixl/Constants-vixl.h b/js/src/jit/arm64/vixl/Constants-vixl.h
new file mode 100644
index 0000000000..2c174e61a5
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Constants-vixl.h
@@ -0,0 +1,2694 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_CONSTANTS_A64_H_
+#define VIXL_A64_CONSTANTS_A64_H_
+
+#include <stdint.h>
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Supervisor Call (svc) specific support.
+//
+// The SVC instruction encodes an optional 16-bit immediate value.
+// The simulator understands the codes below.
+enum SVCSimulatorCodes {
+ kCallRtRedirected = 0x10, // Transition to x86_64 C code.
+ kMarkStackPointer = 0x11, // Push the current SP on a special Simulator stack.
+ kCheckStackPointer = 0x12 // Pop from the special Simulator stack and compare to SP.
+};
+
+const unsigned kNumberOfRegisters = 32;
+const unsigned kNumberOfVRegisters = 32;
+const unsigned kNumberOfFPRegisters = kNumberOfVRegisters;
+// Callee saved registers are x21-x30(lr).
+const int kNumberOfCalleeSavedRegisters = 10;
+const int kFirstCalleeSavedRegisterIndex = 21;
+// Callee saved FP registers are d8-d15. Note that the high parts of v8-v15 are
+// still caller-saved.
+const int kNumberOfCalleeSavedFPRegisters = 8;
+const int kFirstCalleeSavedFPRegisterIndex = 8;
+
+#define REGISTER_CODE_LIST(R) \
+R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
+R(8) R(9) R(10) R(11) R(12) R(13) R(14) R(15) \
+R(16) R(17) R(18) R(19) R(20) R(21) R(22) R(23) \
+R(24) R(25) R(26) R(27) R(28) R(29) R(30) R(31)
+
+#define INSTRUCTION_FIELDS_LIST(V_) \
+/* Register fields */ \
+V_(Rd, 4, 0, Bits) /* Destination register. */ \
+V_(Rn, 9, 5, Bits) /* First source register. */ \
+V_(Rm, 20, 16, Bits) /* Second source register. */ \
+V_(Ra, 14, 10, Bits) /* Third source register. */ \
+V_(Rt, 4, 0, Bits) /* Load/store register. */ \
+V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \
+V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \
+ \
+/* Common bits */ \
+V_(SixtyFourBits, 31, 31, Bits) \
+V_(FlagsUpdate, 29, 29, Bits) \
+ \
+/* PC relative addressing */ \
+V_(ImmPCRelHi, 23, 5, SignedBits) \
+V_(ImmPCRelLo, 30, 29, Bits) \
+ \
+/* Add/subtract/logical shift register */ \
+V_(ShiftDP, 23, 22, Bits) \
+V_(ImmDPShift, 15, 10, Bits) \
+ \
+/* Add/subtract immediate */ \
+V_(ImmAddSub, 21, 10, Bits) \
+V_(ShiftAddSub, 23, 22, Bits) \
+ \
+/* Add/substract extend */ \
+V_(ImmExtendShift, 12, 10, Bits) \
+V_(ExtendMode, 15, 13, Bits) \
+ \
+/* Move wide */ \
+V_(ImmMoveWide, 20, 5, Bits) \
+V_(ShiftMoveWide, 22, 21, Bits) \
+ \
+/* Logical immediate, bitfield and extract */ \
+V_(BitN, 22, 22, Bits) \
+V_(ImmRotate, 21, 16, Bits) \
+V_(ImmSetBits, 15, 10, Bits) \
+V_(ImmR, 21, 16, Bits) \
+V_(ImmS, 15, 10, Bits) \
+ \
+/* Test and branch immediate */ \
+V_(ImmTestBranch, 18, 5, SignedBits) \
+V_(ImmTestBranchBit40, 23, 19, Bits) \
+V_(ImmTestBranchBit5, 31, 31, Bits) \
+ \
+/* Conditionals */ \
+V_(Condition, 15, 12, Bits) \
+V_(ConditionBranch, 3, 0, Bits) \
+V_(Nzcv, 3, 0, Bits) \
+V_(ImmCondCmp, 20, 16, Bits) \
+V_(ImmCondBranch, 23, 5, SignedBits) \
+ \
+/* Floating point */ \
+V_(FPType, 23, 22, Bits) \
+V_(ImmFP, 20, 13, Bits) \
+V_(FPScale, 15, 10, Bits) \
+ \
+/* Load Store */ \
+V_(ImmLS, 20, 12, SignedBits) \
+V_(ImmLSUnsigned, 21, 10, Bits) \
+V_(ImmLSPair, 21, 15, SignedBits) \
+V_(ImmShiftLS, 12, 12, Bits) \
+V_(LSOpc, 23, 22, Bits) \
+V_(LSVector, 26, 26, Bits) \
+V_(LSSize, 31, 30, Bits) \
+V_(ImmPrefetchOperation, 4, 0, Bits) \
+V_(PrefetchHint, 4, 3, Bits) \
+V_(PrefetchTarget, 2, 1, Bits) \
+V_(PrefetchStream, 0, 0, Bits) \
+ \
+/* Other immediates */ \
+V_(ImmUncondBranch, 25, 0, SignedBits) \
+V_(ImmCmpBranch, 23, 5, SignedBits) \
+V_(ImmLLiteral, 23, 5, SignedBits) \
+V_(ImmException, 20, 5, Bits) \
+V_(ImmHint, 11, 5, Bits) \
+V_(ImmBarrierDomain, 11, 10, Bits) \
+V_(ImmBarrierType, 9, 8, Bits) \
+ \
+/* System (MRS, MSR, SYS) */ \
+V_(ImmSystemRegister, 19, 5, Bits) \
+V_(SysO0, 19, 19, Bits) \
+V_(SysOp, 18, 5, Bits) \
+V_(SysOp1, 18, 16, Bits) \
+V_(SysOp2, 7, 5, Bits) \
+V_(CRn, 15, 12, Bits) \
+V_(CRm, 11, 8, Bits) \
+ \
+/* Load-/store-exclusive */ \
+V_(LdStXLoad, 22, 22, Bits) \
+V_(LdStXNotExclusive, 23, 23, Bits) \
+V_(LdStXAcquireRelease, 15, 15, Bits) \
+V_(LdStXSizeLog2, 31, 30, Bits) \
+V_(LdStXPair, 21, 21, Bits) \
+ \
+/* NEON generic fields */ \
+V_(NEONQ, 30, 30, Bits) \
+V_(NEONSize, 23, 22, Bits) \
+V_(NEONLSSize, 11, 10, Bits) \
+V_(NEONS, 12, 12, Bits) \
+V_(NEONL, 21, 21, Bits) \
+V_(NEONM, 20, 20, Bits) \
+V_(NEONH, 11, 11, Bits) \
+V_(ImmNEONExt, 14, 11, Bits) \
+V_(ImmNEON5, 20, 16, Bits) \
+V_(ImmNEON4, 14, 11, Bits) \
+ \
+/* NEON Modified Immediate fields */ \
+V_(ImmNEONabc, 18, 16, Bits) \
+V_(ImmNEONdefgh, 9, 5, Bits) \
+V_(NEONModImmOp, 29, 29, Bits) \
+V_(NEONCmode, 15, 12, Bits) \
+ \
+/* NEON Shift Immediate fields */ \
+V_(ImmNEONImmhImmb, 22, 16, Bits) \
+V_(ImmNEONImmh, 22, 19, Bits) \
+V_(ImmNEONImmb, 18, 16, Bits)
+
+#define SYSTEM_REGISTER_FIELDS_LIST(V_, M_) \
+/* NZCV */ \
+V_(Flags, 31, 28, Bits) \
+V_(N, 31, 31, Bits) \
+V_(Z, 30, 30, Bits) \
+V_(C, 29, 29, Bits) \
+V_(V, 28, 28, Bits) \
+M_(NZCV, Flags_mask) \
+/* FPCR */ \
+V_(AHP, 26, 26, Bits) \
+V_(DN, 25, 25, Bits) \
+V_(FZ, 24, 24, Bits) \
+V_(RMode, 23, 22, Bits) \
+M_(FPCR, AHP_mask | DN_mask | FZ_mask | RMode_mask)
+
+// Fields offsets.
+#define DECLARE_FIELDS_OFFSETS(Name, HighBit, LowBit, X) \
+const int Name##_offset = LowBit; \
+const int Name##_width = HighBit - LowBit + 1; \
+const uint32_t Name##_mask = ((1 << Name##_width) - 1) << LowBit;
+#define NOTHING(A, B)
+INSTRUCTION_FIELDS_LIST(DECLARE_FIELDS_OFFSETS)
+SYSTEM_REGISTER_FIELDS_LIST(DECLARE_FIELDS_OFFSETS, NOTHING)
+#undef NOTHING
+#undef DECLARE_FIELDS_BITS
+
+// ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST), formed
+// from ImmPCRelLo and ImmPCRelHi.
+const int ImmPCRel_mask = ImmPCRelLo_mask | ImmPCRelHi_mask;
+
+// Condition codes.
+enum Condition {
+ eq = 0, // Z set Equal.
+ ne = 1, // Z clear Not equal.
+ cs = 2, // C set Carry set.
+ cc = 3, // C clear Carry clear.
+ mi = 4, // N set Negative.
+ pl = 5, // N clear Positive or zero.
+ vs = 6, // V set Overflow.
+ vc = 7, // V clear No overflow.
+ hi = 8, // C set, Z clear Unsigned higher.
+ ls = 9, // C clear or Z set Unsigned lower or same.
+ ge = 10, // N == V Greater or equal.
+ lt = 11, // N != V Less than.
+ gt = 12, // Z clear, N == V Greater than.
+ le = 13, // Z set or N != V Less then or equal
+ al = 14, // Always.
+ nv = 15, // Behaves as always/al.
+
+ // Aliases.
+ hs = cs, // C set Unsigned higher or same.
+ lo = cc, // C clear Unsigned lower.
+
+ // Mozilla expanded aliases.
+ Equal = 0, Zero = 0,
+ NotEqual = 1, NonZero = 1,
+ AboveOrEqual = 2, CarrySet = 2,
+ Below = 3, CarryClear = 3,
+ Signed = 4,
+ NotSigned = 5,
+ Overflow = 6,
+ NoOverflow = 7,
+ Above = 8,
+ BelowOrEqual = 9,
+ GreaterThanOrEqual_ = 10,
+ LessThan_ = 11,
+ GreaterThan_ = 12,
+ LessThanOrEqual_ = 13,
+ Always = 14,
+ Never = 15
+};
+
+inline Condition InvertCondition(Condition cond) {
+ // Conditions al and nv behave identically, as "always true". They can't be
+ // inverted, because there is no "always false" condition.
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ return static_cast<Condition>(cond ^ 1);
+}
+
+enum FPTrapFlags {
+ EnableTrap = 1,
+ DisableTrap = 0
+};
+
+enum FlagsUpdate {
+ SetFlags = 1,
+ LeaveFlags = 0
+};
+
+enum StatusFlags {
+ NoFlag = 0,
+
+ // Derive the flag combinations from the system register bit descriptions.
+ NFlag = N_mask,
+ ZFlag = Z_mask,
+ CFlag = C_mask,
+ VFlag = V_mask,
+ NZFlag = NFlag | ZFlag,
+ NCFlag = NFlag | CFlag,
+ NVFlag = NFlag | VFlag,
+ ZCFlag = ZFlag | CFlag,
+ ZVFlag = ZFlag | VFlag,
+ CVFlag = CFlag | VFlag,
+ NZCFlag = NFlag | ZFlag | CFlag,
+ NZVFlag = NFlag | ZFlag | VFlag,
+ NCVFlag = NFlag | CFlag | VFlag,
+ ZCVFlag = ZFlag | CFlag | VFlag,
+ NZCVFlag = NFlag | ZFlag | CFlag | VFlag,
+
+ // Floating-point comparison results.
+ FPEqualFlag = ZCFlag,
+ FPLessThanFlag = NFlag,
+ FPGreaterThanFlag = CFlag,
+ FPUnorderedFlag = CVFlag
+};
+
+enum Shift {
+ NO_SHIFT = -1,
+ LSL = 0x0,
+ LSR = 0x1,
+ ASR = 0x2,
+ ROR = 0x3,
+ MSL = 0x4
+};
+
+enum Extend {
+ NO_EXTEND = -1,
+ UXTB = 0,
+ UXTH = 1,
+ UXTW = 2,
+ UXTX = 3,
+ SXTB = 4,
+ SXTH = 5,
+ SXTW = 6,
+ SXTX = 7
+};
+
+enum SystemHint {
+ NOP = 0,
+ YIELD = 1,
+ WFE = 2,
+ WFI = 3,
+ SEV = 4,
+ SEVL = 5,
+ ESB = 16,
+ CSDB = 20,
+ BTI = 32,
+ BTI_c = 34,
+ BTI_j = 36,
+ BTI_jc = 38
+};
+
+enum BranchTargetIdentifier {
+ EmitBTI_none = NOP,
+ EmitBTI = BTI,
+ EmitBTI_c = BTI_c,
+ EmitBTI_j = BTI_j,
+ EmitBTI_jc = BTI_jc,
+
+ // These correspond to the values of the CRm:op2 fields in the equivalent HINT
+ // instruction.
+ EmitPACIASP = 25,
+ EmitPACIBSP = 27
+};
+
+enum BarrierDomain {
+ OuterShareable = 0,
+ NonShareable = 1,
+ InnerShareable = 2,
+ FullSystem = 3
+};
+
+enum BarrierType {
+ BarrierOther = 0,
+ BarrierReads = 1,
+ BarrierWrites = 2,
+ BarrierAll = 3
+};
+
+enum PrefetchOperation {
+ PLDL1KEEP = 0x00,
+ PLDL1STRM = 0x01,
+ PLDL2KEEP = 0x02,
+ PLDL2STRM = 0x03,
+ PLDL3KEEP = 0x04,
+ PLDL3STRM = 0x05,
+
+ PLIL1KEEP = 0x08,
+ PLIL1STRM = 0x09,
+ PLIL2KEEP = 0x0a,
+ PLIL2STRM = 0x0b,
+ PLIL3KEEP = 0x0c,
+ PLIL3STRM = 0x0d,
+
+ PSTL1KEEP = 0x10,
+ PSTL1STRM = 0x11,
+ PSTL2KEEP = 0x12,
+ PSTL2STRM = 0x13,
+ PSTL3KEEP = 0x14,
+ PSTL3STRM = 0x15
+};
+
+enum BType {
+ // Set when executing any instruction on a guarded page, except those cases
+ // listed below.
+ DefaultBType = 0,
+
+ // Set when an indirect branch is taken from an unguarded page to a guarded
+ // page, or from a guarded page to ip0 or ip1 (x16 or x17), eg "br ip0".
+ BranchFromUnguardedOrToIP = 1,
+
+ // Set when an indirect branch and link (call) is taken, eg. "blr x0".
+ BranchAndLink = 2,
+
+ // Set when an indirect branch is taken from a guarded page to a register
+ // that is not ip0 or ip1 (x16 or x17), eg, "br x0".
+ BranchFromGuardedNotToIP = 3
+};
+
+template<int op0, int op1, int crn, int crm, int op2>
+class SystemRegisterEncoder {
+ public:
+ static const uint32_t value =
+ ((op0 << SysO0_offset) |
+ (op1 << SysOp1_offset) |
+ (crn << CRn_offset) |
+ (crm << CRm_offset) |
+ (op2 << SysOp2_offset)) >> ImmSystemRegister_offset;
+};
+
+// System/special register names.
+// This information is not encoded as one field but as the concatenation of
+// multiple fields (Op0, Op1, Crn, Crm, Op2).
+enum SystemRegister {
+ NZCV = SystemRegisterEncoder<3, 3, 4, 2, 0>::value,
+ FPCR = SystemRegisterEncoder<3, 3, 4, 4, 0>::value,
+ RNDR = SystemRegisterEncoder<3, 3, 2, 4, 0>::value, // Random number.
+ RNDRRS = SystemRegisterEncoder<3, 3, 2, 4, 1>::value // Reseeded random number.
+};
+
+template<int op1, int crn, int crm, int op2>
+class CacheOpEncoder {
+ public:
+ static const uint32_t value =
+ ((op1 << SysOp1_offset) |
+ (crn << CRn_offset) |
+ (crm << CRm_offset) |
+ (op2 << SysOp2_offset)) >> SysOp_offset;
+};
+
+enum InstructionCacheOp : uint32_t {
+ IVAU = CacheOpEncoder<3, 7, 5, 1>::value
+};
+
+enum DataCacheOp : uint32_t {
+ CVAC = CacheOpEncoder<3, 7, 10, 1>::value,
+ CVAU = CacheOpEncoder<3, 7, 11, 1>::value,
+ CVAP = CacheOpEncoder<3, 7, 12, 1>::value,
+ CVADP = CacheOpEncoder<3, 7, 13, 1>::value,
+ CIVAC = CacheOpEncoder<3, 7, 14, 1>::value,
+ ZVA = CacheOpEncoder<3, 7, 4, 1>::value
+};
+
+// Instruction enumerations.
+//
+// These are the masks that define a class of instructions, and the list of
+// instructions within each class. Each enumeration has a Fixed, FMask and
+// Mask value.
+//
+// Fixed: The fixed bits in this instruction class.
+// FMask: The mask used to extract the fixed bits in the class.
+// Mask: The mask used to identify the instructions within a class.
+//
+// The enumerations can be used like this:
+//
+// VIXL_ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// switch(instr->Mask(PCRelAddressingMask)) {
+// case ADR: Format("adr 'Xd, 'AddrPCRelByte"); break;
+// case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
+// default: printf("Unknown instruction\n");
+// }
+
+
+// Generic fields.
+enum GenericInstrField : uint32_t {
+ SixtyFourBits = 0x80000000,
+ ThirtyTwoBits = 0x00000000,
+
+ FPTypeMask = 0x00C00000,
+ FP16 = 0x00C00000,
+ FP32 = 0x00000000,
+ FP64 = 0x00400000
+};
+
+enum NEONFormatField : uint32_t {
+ NEONFormatFieldMask = 0x40C00000,
+ NEON_Q = 0x40000000,
+ NEON_8B = 0x00000000,
+ NEON_16B = NEON_8B | NEON_Q,
+ NEON_4H = 0x00400000,
+ NEON_8H = NEON_4H | NEON_Q,
+ NEON_2S = 0x00800000,
+ NEON_4S = NEON_2S | NEON_Q,
+ NEON_1D = 0x00C00000,
+ NEON_2D = 0x00C00000 | NEON_Q
+};
+
+enum NEONFPFormatField : uint32_t {
+ NEONFPFormatFieldMask = 0x40400000,
+ NEON_FP_4H = FP16,
+ NEON_FP_2S = FP32,
+ NEON_FP_8H = FP16 | NEON_Q,
+ NEON_FP_4S = FP32 | NEON_Q,
+ NEON_FP_2D = FP64 | NEON_Q
+};
+
+enum NEONLSFormatField : uint32_t {
+ NEONLSFormatFieldMask = 0x40000C00,
+ LS_NEON_8B = 0x00000000,
+ LS_NEON_16B = LS_NEON_8B | NEON_Q,
+ LS_NEON_4H = 0x00000400,
+ LS_NEON_8H = LS_NEON_4H | NEON_Q,
+ LS_NEON_2S = 0x00000800,
+ LS_NEON_4S = LS_NEON_2S | NEON_Q,
+ LS_NEON_1D = 0x00000C00,
+ LS_NEON_2D = LS_NEON_1D | NEON_Q
+};
+
+enum NEONScalarFormatField : uint32_t {
+ NEONScalarFormatFieldMask = 0x00C00000,
+ NEONScalar = 0x10000000,
+ NEON_B = 0x00000000,
+ NEON_H = 0x00400000,
+ NEON_S = 0x00800000,
+ NEON_D = 0x00C00000
+};
+
+// PC relative addressing.
+enum PCRelAddressingOp : uint32_t {
+ PCRelAddressingFixed = 0x10000000,
+ PCRelAddressingFMask = 0x1F000000,
+ PCRelAddressingMask = 0x9F000000,
+ ADR = PCRelAddressingFixed | 0x00000000,
+ ADRP = PCRelAddressingFixed | 0x80000000
+};
+
+// Add/sub (immediate, shifted and extended.)
+const int kSFOffset = 31;
+enum AddSubOp : uint32_t {
+ AddSubOpMask = 0x60000000,
+ AddSubSetFlagsBit = 0x20000000,
+ ADD = 0x00000000,
+ ADDS = ADD | AddSubSetFlagsBit,
+ SUB = 0x40000000,
+ SUBS = SUB | AddSubSetFlagsBit
+};
+
+#define ADD_SUB_OP_LIST(V) \
+ V(ADD), \
+ V(ADDS), \
+ V(SUB), \
+ V(SUBS)
+
+enum AddSubImmediateOp : uint32_t {
+ AddSubImmediateFixed = 0x11000000,
+ AddSubImmediateFMask = 0x1F000000,
+ AddSubImmediateMask = 0xFF000000,
+ #define ADD_SUB_IMMEDIATE(A) \
+ A##_w_imm = AddSubImmediateFixed | A, \
+ A##_x_imm = AddSubImmediateFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_IMMEDIATE)
+ #undef ADD_SUB_IMMEDIATE
+};
+
+enum AddSubShiftedOp : uint32_t {
+ AddSubShiftedFixed = 0x0B000000,
+ AddSubShiftedFMask = 0x1F200000,
+ AddSubShiftedMask = 0xFF200000,
+ #define ADD_SUB_SHIFTED(A) \
+ A##_w_shift = AddSubShiftedFixed | A, \
+ A##_x_shift = AddSubShiftedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_SHIFTED)
+ #undef ADD_SUB_SHIFTED
+};
+
+enum AddSubExtendedOp : uint32_t {
+ AddSubExtendedFixed = 0x0B200000,
+ AddSubExtendedFMask = 0x1F200000,
+ AddSubExtendedMask = 0xFFE00000,
+ #define ADD_SUB_EXTENDED(A) \
+ A##_w_ext = AddSubExtendedFixed | A, \
+ A##_x_ext = AddSubExtendedFixed | A | SixtyFourBits
+ ADD_SUB_OP_LIST(ADD_SUB_EXTENDED)
+ #undef ADD_SUB_EXTENDED
+};
+
+// Add/sub with carry.
+enum AddSubWithCarryOp : uint32_t {
+ AddSubWithCarryFixed = 0x1A000000,
+ AddSubWithCarryFMask = 0x1FE00000,
+ AddSubWithCarryMask = 0xFFE0FC00,
+ ADC_w = AddSubWithCarryFixed | ADD,
+ ADC_x = AddSubWithCarryFixed | ADD | SixtyFourBits,
+ ADC = ADC_w,
+ ADCS_w = AddSubWithCarryFixed | ADDS,
+ ADCS_x = AddSubWithCarryFixed | ADDS | SixtyFourBits,
+ SBC_w = AddSubWithCarryFixed | SUB,
+ SBC_x = AddSubWithCarryFixed | SUB | SixtyFourBits,
+ SBC = SBC_w,
+ SBCS_w = AddSubWithCarryFixed | SUBS,
+ SBCS_x = AddSubWithCarryFixed | SUBS | SixtyFourBits
+};
+
+// Rotate right into flags.
+enum RotateRightIntoFlagsOp : uint32_t {
+ RotateRightIntoFlagsFixed = 0x1A000400,
+ RotateRightIntoFlagsFMask = 0x1FE07C00,
+ RotateRightIntoFlagsMask = 0xFFE07C10,
+ RMIF = RotateRightIntoFlagsFixed | 0xA0000000
+};
+
+// Evaluate into flags.
+enum EvaluateIntoFlagsOp : uint32_t {
+ EvaluateIntoFlagsFixed = 0x1A000800,
+ EvaluateIntoFlagsFMask = 0x1FE03C00,
+ EvaluateIntoFlagsMask = 0xFFE07C1F,
+ SETF8 = EvaluateIntoFlagsFixed | 0x2000000D,
+ SETF16 = EvaluateIntoFlagsFixed | 0x2000400D
+};
+
+// Logical (immediate and shifted register).
+enum LogicalOp : uint32_t {
+ LogicalOpMask = 0x60200000,
+ NOT = 0x00200000,
+ AND = 0x00000000,
+ BIC = AND | NOT,
+ ORR = 0x20000000,
+ ORN = ORR | NOT,
+ EOR = 0x40000000,
+ EON = EOR | NOT,
+ ANDS = 0x60000000,
+ BICS = ANDS | NOT
+};
+
+// Logical immediate.
+enum LogicalImmediateOp : uint32_t {
+ LogicalImmediateFixed = 0x12000000,
+ LogicalImmediateFMask = 0x1F800000,
+ LogicalImmediateMask = 0xFF800000,
+ AND_w_imm = LogicalImmediateFixed | AND,
+ AND_x_imm = LogicalImmediateFixed | AND | SixtyFourBits,
+ ORR_w_imm = LogicalImmediateFixed | ORR,
+ ORR_x_imm = LogicalImmediateFixed | ORR | SixtyFourBits,
+ EOR_w_imm = LogicalImmediateFixed | EOR,
+ EOR_x_imm = LogicalImmediateFixed | EOR | SixtyFourBits,
+ ANDS_w_imm = LogicalImmediateFixed | ANDS,
+ ANDS_x_imm = LogicalImmediateFixed | ANDS | SixtyFourBits
+};
+
+// Logical shifted register.
+enum LogicalShiftedOp : uint32_t {
+ LogicalShiftedFixed = 0x0A000000,
+ LogicalShiftedFMask = 0x1F000000,
+ LogicalShiftedMask = 0xFF200000,
+ AND_w = LogicalShiftedFixed | AND,
+ AND_x = LogicalShiftedFixed | AND | SixtyFourBits,
+ AND_shift = AND_w,
+ BIC_w = LogicalShiftedFixed | BIC,
+ BIC_x = LogicalShiftedFixed | BIC | SixtyFourBits,
+ BIC_shift = BIC_w,
+ ORR_w = LogicalShiftedFixed | ORR,
+ ORR_x = LogicalShiftedFixed | ORR | SixtyFourBits,
+ ORR_shift = ORR_w,
+ ORN_w = LogicalShiftedFixed | ORN,
+ ORN_x = LogicalShiftedFixed | ORN | SixtyFourBits,
+ ORN_shift = ORN_w,
+ EOR_w = LogicalShiftedFixed | EOR,
+ EOR_x = LogicalShiftedFixed | EOR | SixtyFourBits,
+ EOR_shift = EOR_w,
+ EON_w = LogicalShiftedFixed | EON,
+ EON_x = LogicalShiftedFixed | EON | SixtyFourBits,
+ EON_shift = EON_w,
+ ANDS_w = LogicalShiftedFixed | ANDS,
+ ANDS_x = LogicalShiftedFixed | ANDS | SixtyFourBits,
+ ANDS_shift = ANDS_w,
+ BICS_w = LogicalShiftedFixed | BICS,
+ BICS_x = LogicalShiftedFixed | BICS | SixtyFourBits,
+ BICS_shift = BICS_w
+};
+
+// Move wide immediate.
+enum MoveWideImmediateOp : uint32_t {
+ MoveWideImmediateFixed = 0x12800000,
+ MoveWideImmediateFMask = 0x1F800000,
+ MoveWideImmediateMask = 0xFF800000,
+ MOVN = 0x00000000,
+ MOVZ = 0x40000000,
+ MOVK = 0x60000000,
+ MOVN_w = MoveWideImmediateFixed | MOVN,
+ MOVN_x = MoveWideImmediateFixed | MOVN | SixtyFourBits,
+ MOVZ_w = MoveWideImmediateFixed | MOVZ,
+ MOVZ_x = MoveWideImmediateFixed | MOVZ | SixtyFourBits,
+ MOVK_w = MoveWideImmediateFixed | MOVK,
+ MOVK_x = MoveWideImmediateFixed | MOVK | SixtyFourBits
+};
+
+// Bitfield.
+const int kBitfieldNOffset = 22;
+enum BitfieldOp : uint32_t {
+ BitfieldFixed = 0x13000000,
+ BitfieldFMask = 0x1F800000,
+ BitfieldMask = 0xFF800000,
+ SBFM_w = BitfieldFixed | 0x00000000,
+ SBFM_x = BitfieldFixed | 0x80000000,
+ SBFM = SBFM_w,
+ BFM_w = BitfieldFixed | 0x20000000,
+ BFM_x = BitfieldFixed | 0xA0000000,
+ BFM = BFM_w,
+ UBFM_w = BitfieldFixed | 0x40000000,
+ UBFM_x = BitfieldFixed | 0xC0000000,
+ UBFM = UBFM_w
+ // Bitfield N field.
+};
+
+// Extract.
+enum ExtractOp : uint32_t {
+ ExtractFixed = 0x13800000,
+ ExtractFMask = 0x1F800000,
+ ExtractMask = 0xFFA00000,
+ EXTR_w = ExtractFixed | 0x00000000,
+ EXTR_x = ExtractFixed | 0x80000000,
+ EXTR = EXTR_w
+};
+
+// Unconditional branch.
+enum UnconditionalBranchOp : uint32_t {
+ UnconditionalBranchFixed = 0x14000000,
+ UnconditionalBranchFMask = 0x7C000000,
+ UnconditionalBranchMask = 0xFC000000,
+ B = UnconditionalBranchFixed | 0x00000000,
+ BL = UnconditionalBranchFixed | 0x80000000
+};
+
+// Unconditional branch to register.
+enum UnconditionalBranchToRegisterOp : uint32_t {
+ UnconditionalBranchToRegisterFixed = 0xD6000000,
+ UnconditionalBranchToRegisterFMask = 0xFE000000,
+ UnconditionalBranchToRegisterMask = 0xFFFFFC00,
+ BR = UnconditionalBranchToRegisterFixed | 0x001F0000,
+ BLR = UnconditionalBranchToRegisterFixed | 0x003F0000,
+ RET = UnconditionalBranchToRegisterFixed | 0x005F0000,
+
+ BRAAZ = UnconditionalBranchToRegisterFixed | 0x001F0800,
+ BRABZ = UnconditionalBranchToRegisterFixed | 0x001F0C00,
+ BLRAAZ = UnconditionalBranchToRegisterFixed | 0x003F0800,
+ BLRABZ = UnconditionalBranchToRegisterFixed | 0x003F0C00,
+ RETAA = UnconditionalBranchToRegisterFixed | 0x005F0800,
+ RETAB = UnconditionalBranchToRegisterFixed | 0x005F0C00,
+ BRAA = UnconditionalBranchToRegisterFixed | 0x011F0800,
+ BRAB = UnconditionalBranchToRegisterFixed | 0x011F0C00,
+ BLRAA = UnconditionalBranchToRegisterFixed | 0x013F0800,
+ BLRAB = UnconditionalBranchToRegisterFixed | 0x013F0C00
+};
+
+// Compare and branch.
+enum CompareBranchOp : uint32_t {
+ CompareBranchFixed = 0x34000000,
+ CompareBranchFMask = 0x7E000000,
+ CompareBranchMask = 0xFF000000,
+ CBZ_w = CompareBranchFixed | 0x00000000,
+ CBZ_x = CompareBranchFixed | 0x80000000,
+ CBZ = CBZ_w,
+ CBNZ_w = CompareBranchFixed | 0x01000000,
+ CBNZ_x = CompareBranchFixed | 0x81000000,
+ CBNZ = CBNZ_w
+};
+
+// Test and branch.
+enum TestBranchOp : uint32_t {
+ TestBranchFixed = 0x36000000,
+ TestBranchFMask = 0x7E000000,
+ TestBranchMask = 0x7F000000,
+ TBZ = TestBranchFixed | 0x00000000,
+ TBNZ = TestBranchFixed | 0x01000000
+};
+
+// Conditional branch.
+enum ConditionalBranchOp : uint32_t {
+ ConditionalBranchFixed = 0x54000000,
+ ConditionalBranchFMask = 0xFE000000,
+ ConditionalBranchMask = 0xFF000010,
+ B_cond = ConditionalBranchFixed | 0x00000000
+};
+
+// System.
+// System instruction encoding is complicated because some instructions use op
+// and CR fields to encode parameters. To handle this cleanly, the system
+// instructions are split into more than one enum.
+
+enum SystemOp : uint32_t {
+ SystemFixed = 0xD5000000,
+ SystemFMask = 0xFFC00000
+};
+
+enum SystemSysRegOp : uint32_t {
+ SystemSysRegFixed = 0xD5100000,
+ SystemSysRegFMask = 0xFFD00000,
+ SystemSysRegMask = 0xFFF00000,
+ MRS = SystemSysRegFixed | 0x00200000,
+ MSR = SystemSysRegFixed | 0x00000000
+};
+
+enum SystemPStateOp : uint32_t {
+ SystemPStateFixed = 0xD5004000,
+ SystemPStateFMask = 0xFFF8F000,
+ SystemPStateMask = 0xFFFFF0FF,
+ CFINV = SystemPStateFixed | 0x0000001F,
+ XAFLAG = SystemPStateFixed | 0x0000003F,
+ AXFLAG = SystemPStateFixed | 0x0000005F
+};
+
+enum SystemHintOp : uint32_t {
+ SystemHintFixed = 0xD503201F,
+ SystemHintFMask = 0xFFFFF01F,
+ SystemHintMask = 0xFFFFF01F,
+ HINT = SystemHintFixed | 0x00000000
+};
+
+enum SystemSysOp : uint32_t {
+ SystemSysFixed = 0xD5080000,
+ SystemSysFMask = 0xFFF80000,
+ SystemSysMask = 0xFFF80000,
+ SYS = SystemSysFixed | 0x00000000
+};
+
+// Exception.
+enum ExceptionOp : uint32_t {
+ ExceptionFixed = 0xD4000000,
+ ExceptionFMask = 0xFF000000,
+ ExceptionMask = 0xFFE0001F,
+ HLT = ExceptionFixed | 0x00400000,
+ BRK = ExceptionFixed | 0x00200000,
+ SVC = ExceptionFixed | 0x00000001,
+ HVC = ExceptionFixed | 0x00000002,
+ SMC = ExceptionFixed | 0x00000003,
+ DCPS1 = ExceptionFixed | 0x00A00001,
+ DCPS2 = ExceptionFixed | 0x00A00002,
+ DCPS3 = ExceptionFixed | 0x00A00003
+};
+
+enum MemBarrierOp : uint32_t {
+ MemBarrierFixed = 0xD503309F,
+ MemBarrierFMask = 0xFFFFF09F,
+ MemBarrierMask = 0xFFFFF0FF,
+ DSB = MemBarrierFixed | 0x00000000,
+ DMB = MemBarrierFixed | 0x00000020,
+ ISB = MemBarrierFixed | 0x00000040
+};
+
+enum SystemExclusiveMonitorOp : uint32_t {
+ SystemExclusiveMonitorFixed = 0xD503305F,
+ SystemExclusiveMonitorFMask = 0xFFFFF0FF,
+ SystemExclusiveMonitorMask = 0xFFFFF0FF,
+ CLREX = SystemExclusiveMonitorFixed
+};
+
+enum SystemPAuthOp : uint32_t {
+ SystemPAuthFixed = 0xD503211F,
+ SystemPAuthFMask = 0xFFFFFD1F,
+ SystemPAuthMask = 0xFFFFFFFF,
+ PACIA1716 = SystemPAuthFixed | 0x00000100,
+ PACIB1716 = SystemPAuthFixed | 0x00000140,
+ AUTIA1716 = SystemPAuthFixed | 0x00000180,
+ AUTIB1716 = SystemPAuthFixed | 0x000001C0,
+ PACIAZ = SystemPAuthFixed | 0x00000300,
+ PACIASP = SystemPAuthFixed | 0x00000320,
+ PACIBZ = SystemPAuthFixed | 0x00000340,
+ PACIBSP = SystemPAuthFixed | 0x00000360,
+ AUTIAZ = SystemPAuthFixed | 0x00000380,
+ AUTIASP = SystemPAuthFixed | 0x000003A0,
+ AUTIBZ = SystemPAuthFixed | 0x000003C0,
+ AUTIBSP = SystemPAuthFixed | 0x000003E0,
+
+ // XPACLRI has the same fixed mask as System Hints and needs to be handled
+ // differently.
+ XPACLRI = 0xD50320FF
+};
+
+// Any load or store.
+enum LoadStoreAnyOp : uint32_t {
+ LoadStoreAnyFMask = 0x0a000000,
+ LoadStoreAnyFixed = 0x08000000
+};
+
+// Any load pair or store pair.
+enum LoadStorePairAnyOp : uint32_t {
+ LoadStorePairAnyFMask = 0x3a000000,
+ LoadStorePairAnyFixed = 0x28000000
+};
+
+#define LOAD_STORE_PAIR_OP_LIST(V) \
+ V(STP, w, 0x00000000), \
+ V(LDP, w, 0x00400000), \
+ V(LDPSW, x, 0x40400000), \
+ V(STP, x, 0x80000000), \
+ V(LDP, x, 0x80400000), \
+ V(STP, s, 0x04000000), \
+ V(LDP, s, 0x04400000), \
+ V(STP, d, 0x44000000), \
+ V(LDP, d, 0x44400000), \
+ V(STP, q, 0x84000000), \
+ V(LDP, q, 0x84400000)
+
+// Load/store pair (post, pre and offset.)
+enum LoadStorePairOp : uint32_t {
+ LoadStorePairMask = 0xC4400000,
+ LoadStorePairLBit = 1 << 22,
+ #define LOAD_STORE_PAIR(A, B, C) \
+ A##_##B = C
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR)
+ #undef LOAD_STORE_PAIR
+};
+
+enum LoadStorePairPostIndexOp : uint32_t {
+ LoadStorePairPostIndexFixed = 0x28800000,
+ LoadStorePairPostIndexFMask = 0x3B800000,
+ LoadStorePairPostIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_POST_INDEX(A, B, C) \
+ A##_##B##_post = LoadStorePairPostIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_POST_INDEX)
+ #undef LOAD_STORE_PAIR_POST_INDEX
+};
+
+enum LoadStorePairPreIndexOp : uint32_t {
+ LoadStorePairPreIndexFixed = 0x29800000,
+ LoadStorePairPreIndexFMask = 0x3B800000,
+ LoadStorePairPreIndexMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_PRE_INDEX(A, B, C) \
+ A##_##B##_pre = LoadStorePairPreIndexFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_PRE_INDEX)
+ #undef LOAD_STORE_PAIR_PRE_INDEX
+};
+
+enum LoadStorePairOffsetOp : uint32_t {
+ LoadStorePairOffsetFixed = 0x29000000,
+ LoadStorePairOffsetFMask = 0x3B800000,
+ LoadStorePairOffsetMask = 0xFFC00000,
+ #define LOAD_STORE_PAIR_OFFSET(A, B, C) \
+ A##_##B##_off = LoadStorePairOffsetFixed | A##_##B
+ LOAD_STORE_PAIR_OP_LIST(LOAD_STORE_PAIR_OFFSET)
+ #undef LOAD_STORE_PAIR_OFFSET
+};
+
+enum LoadStorePairNonTemporalOp : uint32_t {
+ LoadStorePairNonTemporalFixed = 0x28000000,
+ LoadStorePairNonTemporalFMask = 0x3B800000,
+ LoadStorePairNonTemporalMask = 0xFFC00000,
+ LoadStorePairNonTemporalLBit = 1 << 22,
+ STNP_w = LoadStorePairNonTemporalFixed | STP_w,
+ LDNP_w = LoadStorePairNonTemporalFixed | LDP_w,
+ STNP_x = LoadStorePairNonTemporalFixed | STP_x,
+ LDNP_x = LoadStorePairNonTemporalFixed | LDP_x,
+ STNP_s = LoadStorePairNonTemporalFixed | STP_s,
+ LDNP_s = LoadStorePairNonTemporalFixed | LDP_s,
+ STNP_d = LoadStorePairNonTemporalFixed | STP_d,
+ LDNP_d = LoadStorePairNonTemporalFixed | LDP_d,
+ STNP_q = LoadStorePairNonTemporalFixed | STP_q,
+ LDNP_q = LoadStorePairNonTemporalFixed | LDP_q
+};
+
+// Load with pointer authentication.
+enum LoadStorePACOp : uint32_t {
+ LoadStorePACFixed = 0xF8200400,
+ LoadStorePACFMask = 0xFF200400,
+ LoadStorePACMask = 0xFFA00C00,
+ LoadStorePACPreBit = 0x00000800,
+ LDRAA = LoadStorePACFixed | 0x00000000,
+ LDRAA_pre = LoadStorePACPreBit | LDRAA,
+ LDRAB = LoadStorePACFixed | 0x00800000,
+ LDRAB_pre = LoadStorePACPreBit | LDRAB
+};
+
+// Load literal.
+enum LoadLiteralOp : uint32_t {
+ LoadLiteralFixed = 0x18000000,
+ LoadLiteralFMask = 0x3B000000,
+ LoadLiteralMask = 0xFF000000,
+ LDR_w_lit = LoadLiteralFixed | 0x00000000,
+ LDR_x_lit = LoadLiteralFixed | 0x40000000,
+ LDRSW_x_lit = LoadLiteralFixed | 0x80000000,
+ PRFM_lit = LoadLiteralFixed | 0xC0000000,
+ LDR_s_lit = LoadLiteralFixed | 0x04000000,
+ LDR_d_lit = LoadLiteralFixed | 0x44000000,
+ LDR_q_lit = LoadLiteralFixed | 0x84000000
+};
+
+#define LOAD_STORE_OP_LIST(V) \
+ V(ST, RB, w, 0x00000000), \
+ V(ST, RH, w, 0x40000000), \
+ V(ST, R, w, 0x80000000), \
+ V(ST, R, x, 0xC0000000), \
+ V(LD, RB, w, 0x00400000), \
+ V(LD, RH, w, 0x40400000), \
+ V(LD, R, w, 0x80400000), \
+ V(LD, R, x, 0xC0400000), \
+ V(LD, RSB, x, 0x00800000), \
+ V(LD, RSH, x, 0x40800000), \
+ V(LD, RSW, x, 0x80800000), \
+ V(LD, RSB, w, 0x00C00000), \
+ V(LD, RSH, w, 0x40C00000), \
+ V(ST, R, b, 0x04000000), \
+ V(ST, R, h, 0x44000000), \
+ V(ST, R, s, 0x84000000), \
+ V(ST, R, d, 0xC4000000), \
+ V(ST, R, q, 0x04800000), \
+ V(LD, R, b, 0x04400000), \
+ V(LD, R, h, 0x44400000), \
+ V(LD, R, s, 0x84400000), \
+ V(LD, R, d, 0xC4400000), \
+ V(LD, R, q, 0x04C00000)
+
+// Load/store (post, pre, offset and unsigned.)
+enum LoadStoreOp : uint32_t {
+ LoadStoreMask = 0xC4C00000,
+ LoadStoreVMask = 0x04000000,
+ #define LOAD_STORE(A, B, C, D) \
+ A##B##_##C = D
+ LOAD_STORE_OP_LIST(LOAD_STORE),
+ #undef LOAD_STORE
+ PRFM = 0xC0800000
+};
+
+// Load/store unscaled offset.
+enum LoadStoreUnscaledOffsetOp : uint32_t {
+ LoadStoreUnscaledOffsetFixed = 0x38000000,
+ LoadStoreUnscaledOffsetFMask = 0x3B200C00,
+ LoadStoreUnscaledOffsetMask = 0xFFE00C00,
+ PRFUM = LoadStoreUnscaledOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSCALED(A, B, C, D) \
+ A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
+ #undef LOAD_STORE_UNSCALED
+};
+
+// Load/store post index.
+enum LoadStorePostIndex : uint32_t {
+ LoadStorePostIndexFixed = 0x38000400,
+ LoadStorePostIndexFMask = 0x3B200C00,
+ LoadStorePostIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_POST_INDEX(A, B, C, D) \
+ A##B##_##C##_post = LoadStorePostIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_POST_INDEX)
+ #undef LOAD_STORE_POST_INDEX
+};
+
+// Load/store pre index.
+enum LoadStorePreIndex : uint32_t {
+ LoadStorePreIndexFixed = 0x38000C00,
+ LoadStorePreIndexFMask = 0x3B200C00,
+ LoadStorePreIndexMask = 0xFFE00C00,
+ #define LOAD_STORE_PRE_INDEX(A, B, C, D) \
+ A##B##_##C##_pre = LoadStorePreIndexFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_PRE_INDEX)
+ #undef LOAD_STORE_PRE_INDEX
+};
+
+// Load/store unsigned offset.
+enum LoadStoreUnsignedOffset : uint32_t {
+ LoadStoreUnsignedOffsetFixed = 0x39000000,
+ LoadStoreUnsignedOffsetFMask = 0x3B000000,
+ LoadStoreUnsignedOffsetMask = 0xFFC00000,
+ PRFM_unsigned = LoadStoreUnsignedOffsetFixed | PRFM,
+ #define LOAD_STORE_UNSIGNED_OFFSET(A, B, C, D) \
+ A##B##_##C##_unsigned = LoadStoreUnsignedOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_UNSIGNED_OFFSET)
+ #undef LOAD_STORE_UNSIGNED_OFFSET
+};
+
+// Load/store register offset.
+enum LoadStoreRegisterOffset : uint32_t {
+ LoadStoreRegisterOffsetFixed = 0x38200800,
+ LoadStoreRegisterOffsetFMask = 0x3B200C00,
+ LoadStoreRegisterOffsetMask = 0xFFE00C00,
+ PRFM_reg = LoadStoreRegisterOffsetFixed | PRFM,
+ #define LOAD_STORE_REGISTER_OFFSET(A, B, C, D) \
+ A##B##_##C##_reg = LoadStoreRegisterOffsetFixed | D
+ LOAD_STORE_OP_LIST(LOAD_STORE_REGISTER_OFFSET)
+ #undef LOAD_STORE_REGISTER_OFFSET
+};
+
+enum LoadStoreExclusive : uint32_t {
+ LoadStoreExclusiveFixed = 0x08000000,
+ LoadStoreExclusiveFMask = 0x3F000000,
+ LoadStoreExclusiveMask = 0xFFE08000,
+ STXRB_w = LoadStoreExclusiveFixed | 0x00000000,
+ STXRH_w = LoadStoreExclusiveFixed | 0x40000000,
+ STXR_w = LoadStoreExclusiveFixed | 0x80000000,
+ STXR_x = LoadStoreExclusiveFixed | 0xC0000000,
+ LDXRB_w = LoadStoreExclusiveFixed | 0x00400000,
+ LDXRH_w = LoadStoreExclusiveFixed | 0x40400000,
+ LDXR_w = LoadStoreExclusiveFixed | 0x80400000,
+ LDXR_x = LoadStoreExclusiveFixed | 0xC0400000,
+ STXP_w = LoadStoreExclusiveFixed | 0x80200000,
+ STXP_x = LoadStoreExclusiveFixed | 0xC0200000,
+ LDXP_w = LoadStoreExclusiveFixed | 0x80600000,
+ LDXP_x = LoadStoreExclusiveFixed | 0xC0600000,
+ STLXRB_w = LoadStoreExclusiveFixed | 0x00008000,
+ STLXRH_w = LoadStoreExclusiveFixed | 0x40008000,
+ STLXR_w = LoadStoreExclusiveFixed | 0x80008000,
+ STLXR_x = LoadStoreExclusiveFixed | 0xC0008000,
+ LDAXRB_w = LoadStoreExclusiveFixed | 0x00408000,
+ LDAXRH_w = LoadStoreExclusiveFixed | 0x40408000,
+ LDAXR_w = LoadStoreExclusiveFixed | 0x80408000,
+ LDAXR_x = LoadStoreExclusiveFixed | 0xC0408000,
+ STLXP_w = LoadStoreExclusiveFixed | 0x80208000,
+ STLXP_x = LoadStoreExclusiveFixed | 0xC0208000,
+ LDAXP_w = LoadStoreExclusiveFixed | 0x80608000,
+ LDAXP_x = LoadStoreExclusiveFixed | 0xC0608000,
+ STLRB_w = LoadStoreExclusiveFixed | 0x00808000,
+ STLRH_w = LoadStoreExclusiveFixed | 0x40808000,
+ STLR_w = LoadStoreExclusiveFixed | 0x80808000,
+ STLR_x = LoadStoreExclusiveFixed | 0xC0808000,
+ LDARB_w = LoadStoreExclusiveFixed | 0x00C08000,
+ LDARH_w = LoadStoreExclusiveFixed | 0x40C08000,
+ LDAR_w = LoadStoreExclusiveFixed | 0x80C08000,
+ LDAR_x = LoadStoreExclusiveFixed | 0xC0C08000,
+
+ // v8.1 Load/store LORegion ops
+ STLLRB = LoadStoreExclusiveFixed | 0x00800000,
+ LDLARB = LoadStoreExclusiveFixed | 0x00C00000,
+ STLLRH = LoadStoreExclusiveFixed | 0x40800000,
+ LDLARH = LoadStoreExclusiveFixed | 0x40C00000,
+ STLLR_w = LoadStoreExclusiveFixed | 0x80800000,
+ LDLAR_w = LoadStoreExclusiveFixed | 0x80C00000,
+ STLLR_x = LoadStoreExclusiveFixed | 0xC0800000,
+ LDLAR_x = LoadStoreExclusiveFixed | 0xC0C00000,
+
+ // v8.1 Load/store exclusive ops
+ LSEBit_l = 0x00400000,
+ LSEBit_o0 = 0x00008000,
+ LSEBit_sz = 0x40000000,
+ CASFixed = LoadStoreExclusiveFixed | 0x80A00000,
+ CASBFixed = LoadStoreExclusiveFixed | 0x00A00000,
+ CASHFixed = LoadStoreExclusiveFixed | 0x40A00000,
+ CASPFixed = LoadStoreExclusiveFixed | 0x00200000,
+ CAS_w = CASFixed,
+ CAS_x = CASFixed | LSEBit_sz,
+ CASA_w = CASFixed | LSEBit_l,
+ CASA_x = CASFixed | LSEBit_l | LSEBit_sz,
+ CASL_w = CASFixed | LSEBit_o0,
+ CASL_x = CASFixed | LSEBit_o0 | LSEBit_sz,
+ CASAL_w = CASFixed | LSEBit_l | LSEBit_o0,
+ CASAL_x = CASFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz,
+ CASB = CASBFixed,
+ CASAB = CASBFixed | LSEBit_l,
+ CASLB = CASBFixed | LSEBit_o0,
+ CASALB = CASBFixed | LSEBit_l | LSEBit_o0,
+ CASH = CASHFixed,
+ CASAH = CASHFixed | LSEBit_l,
+ CASLH = CASHFixed | LSEBit_o0,
+ CASALH = CASHFixed | LSEBit_l | LSEBit_o0,
+ CASP_w = CASPFixed,
+ CASP_x = CASPFixed | LSEBit_sz,
+ CASPA_w = CASPFixed | LSEBit_l,
+ CASPA_x = CASPFixed | LSEBit_l | LSEBit_sz,
+ CASPL_w = CASPFixed | LSEBit_o0,
+ CASPL_x = CASPFixed | LSEBit_o0 | LSEBit_sz,
+ CASPAL_w = CASPFixed | LSEBit_l | LSEBit_o0,
+ CASPAL_x = CASPFixed | LSEBit_l | LSEBit_o0 | LSEBit_sz
+};
+
+// Load/store RCpc unscaled offset.
+enum LoadStoreRCpcUnscaledOffsetOp : uint32_t {
+ LoadStoreRCpcUnscaledOffsetFixed = 0x19000000,
+ LoadStoreRCpcUnscaledOffsetFMask = 0x3F200C00,
+ LoadStoreRCpcUnscaledOffsetMask = 0xFFE00C00,
+ STLURB = LoadStoreRCpcUnscaledOffsetFixed | 0x00000000,
+ LDAPURB = LoadStoreRCpcUnscaledOffsetFixed | 0x00400000,
+ LDAPURSB_x = LoadStoreRCpcUnscaledOffsetFixed | 0x00800000,
+ LDAPURSB_w = LoadStoreRCpcUnscaledOffsetFixed | 0x00C00000,
+ STLURH = LoadStoreRCpcUnscaledOffsetFixed | 0x40000000,
+ LDAPURH = LoadStoreRCpcUnscaledOffsetFixed | 0x40400000,
+ LDAPURSH_x = LoadStoreRCpcUnscaledOffsetFixed | 0x40800000,
+ LDAPURSH_w = LoadStoreRCpcUnscaledOffsetFixed | 0x40C00000,
+ STLUR_w = LoadStoreRCpcUnscaledOffsetFixed | 0x80000000,
+ LDAPUR_w = LoadStoreRCpcUnscaledOffsetFixed | 0x80400000,
+ LDAPURSW = LoadStoreRCpcUnscaledOffsetFixed | 0x80800000,
+ STLUR_x = LoadStoreRCpcUnscaledOffsetFixed | 0xC0000000,
+ LDAPUR_x = LoadStoreRCpcUnscaledOffsetFixed | 0xC0400000
+};
+
+#define ATOMIC_MEMORY_SIMPLE_OPC_LIST(V) \
+ V(LDADD, 0x00000000), \
+ V(LDCLR, 0x00001000), \
+ V(LDEOR, 0x00002000), \
+ V(LDSET, 0x00003000), \
+ V(LDSMAX, 0x00004000), \
+ V(LDSMIN, 0x00005000), \
+ V(LDUMAX, 0x00006000), \
+ V(LDUMIN, 0x00007000)
+
+// Atomic memory.
+enum AtomicMemoryOp : uint32_t {
+ AtomicMemoryFixed = 0x38200000,
+ AtomicMemoryFMask = 0x3B200C00,
+ AtomicMemoryMask = 0xFFE0FC00,
+ SWPB = AtomicMemoryFixed | 0x00008000,
+ SWPAB = AtomicMemoryFixed | 0x00808000,
+ SWPLB = AtomicMemoryFixed | 0x00408000,
+ SWPALB = AtomicMemoryFixed | 0x00C08000,
+ SWPH = AtomicMemoryFixed | 0x40008000,
+ SWPAH = AtomicMemoryFixed | 0x40808000,
+ SWPLH = AtomicMemoryFixed | 0x40408000,
+ SWPALH = AtomicMemoryFixed | 0x40C08000,
+ SWP_w = AtomicMemoryFixed | 0x80008000,
+ SWPA_w = AtomicMemoryFixed | 0x80808000,
+ SWPL_w = AtomicMemoryFixed | 0x80408000,
+ SWPAL_w = AtomicMemoryFixed | 0x80C08000,
+ SWP_x = AtomicMemoryFixed | 0xC0008000,
+ SWPA_x = AtomicMemoryFixed | 0xC0808000,
+ SWPL_x = AtomicMemoryFixed | 0xC0408000,
+ SWPAL_x = AtomicMemoryFixed | 0xC0C08000,
+ LDAPRB = AtomicMemoryFixed | 0x0080C000,
+ LDAPRH = AtomicMemoryFixed | 0x4080C000,
+ LDAPR_w = AtomicMemoryFixed | 0x8080C000,
+ LDAPR_x = AtomicMemoryFixed | 0xC080C000,
+
+ AtomicMemorySimpleFMask = 0x3B208C00,
+ AtomicMemorySimpleOpMask = 0x00007000,
+#define ATOMIC_MEMORY_SIMPLE(N, OP) \
+ N##Op = OP, \
+ N##B = AtomicMemoryFixed | OP, \
+ N##AB = AtomicMemoryFixed | OP | 0x00800000, \
+ N##LB = AtomicMemoryFixed | OP | 0x00400000, \
+ N##ALB = AtomicMemoryFixed | OP | 0x00C00000, \
+ N##H = AtomicMemoryFixed | OP | 0x40000000, \
+ N##AH = AtomicMemoryFixed | OP | 0x40800000, \
+ N##LH = AtomicMemoryFixed | OP | 0x40400000, \
+ N##ALH = AtomicMemoryFixed | OP | 0x40C00000, \
+ N##_w = AtomicMemoryFixed | OP | 0x80000000, \
+ N##A_w = AtomicMemoryFixed | OP | 0x80800000, \
+ N##L_w = AtomicMemoryFixed | OP | 0x80400000, \
+ N##AL_w = AtomicMemoryFixed | OP | 0x80C00000, \
+ N##_x = AtomicMemoryFixed | OP | 0xC0000000, \
+ N##A_x = AtomicMemoryFixed | OP | 0xC0800000, \
+ N##L_x = AtomicMemoryFixed | OP | 0xC0400000, \
+ N##AL_x = AtomicMemoryFixed | OP | 0xC0C00000
+
+ ATOMIC_MEMORY_SIMPLE_OPC_LIST(ATOMIC_MEMORY_SIMPLE)
+#undef ATOMIC_MEMORY_SIMPLE
+};
+
+// Conditional compare.
+enum ConditionalCompareOp : uint32_t {
+ ConditionalCompareMask = 0x60000000,
+ CCMN = 0x20000000,
+ CCMP = 0x60000000
+};
+
+// Conditional compare register.
+enum ConditionalCompareRegisterOp : uint32_t {
+ ConditionalCompareRegisterFixed = 0x1A400000,
+ ConditionalCompareRegisterFMask = 0x1FE00800,
+ ConditionalCompareRegisterMask = 0xFFE00C10,
+ CCMN_w = ConditionalCompareRegisterFixed | CCMN,
+ CCMN_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMN,
+ CCMP_w = ConditionalCompareRegisterFixed | CCMP,
+ CCMP_x = ConditionalCompareRegisterFixed | SixtyFourBits | CCMP
+};
+
+// Conditional compare immediate.
+enum ConditionalCompareImmediateOp : uint32_t {
+ ConditionalCompareImmediateFixed = 0x1A400800,
+ ConditionalCompareImmediateFMask = 0x1FE00800,
+ ConditionalCompareImmediateMask = 0xFFE00C10,
+ CCMN_w_imm = ConditionalCompareImmediateFixed | CCMN,
+ CCMN_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMN,
+ CCMP_w_imm = ConditionalCompareImmediateFixed | CCMP,
+ CCMP_x_imm = ConditionalCompareImmediateFixed | SixtyFourBits | CCMP
+};
+
+// Conditional select.
+enum ConditionalSelectOp : uint32_t {
+ ConditionalSelectFixed = 0x1A800000,
+ ConditionalSelectFMask = 0x1FE00000,
+ ConditionalSelectMask = 0xFFE00C00,
+ CSEL_w = ConditionalSelectFixed | 0x00000000,
+ CSEL_x = ConditionalSelectFixed | 0x80000000,
+ CSEL = CSEL_w,
+ CSINC_w = ConditionalSelectFixed | 0x00000400,
+ CSINC_x = ConditionalSelectFixed | 0x80000400,
+ CSINC = CSINC_w,
+ CSINV_w = ConditionalSelectFixed | 0x40000000,
+ CSINV_x = ConditionalSelectFixed | 0xC0000000,
+ CSINV = CSINV_w,
+ CSNEG_w = ConditionalSelectFixed | 0x40000400,
+ CSNEG_x = ConditionalSelectFixed | 0xC0000400,
+ CSNEG = CSNEG_w
+};
+
+// Data processing 1 source.
+enum DataProcessing1SourceOp : uint32_t {
+ DataProcessing1SourceFixed = 0x5AC00000,
+ DataProcessing1SourceFMask = 0x5FE00000,
+ DataProcessing1SourceMask = 0xFFFFFC00,
+ RBIT = DataProcessing1SourceFixed | 0x00000000,
+ RBIT_w = RBIT,
+ RBIT_x = RBIT | SixtyFourBits,
+ REV16 = DataProcessing1SourceFixed | 0x00000400,
+ REV16_w = REV16,
+ REV16_x = REV16 | SixtyFourBits,
+ REV = DataProcessing1SourceFixed | 0x00000800,
+ REV_w = REV,
+ REV32_x = REV | SixtyFourBits,
+ REV_x = DataProcessing1SourceFixed | SixtyFourBits | 0x00000C00,
+ CLZ = DataProcessing1SourceFixed | 0x00001000,
+ CLZ_w = CLZ,
+ CLZ_x = CLZ | SixtyFourBits,
+ CLS = DataProcessing1SourceFixed | 0x00001400,
+ CLS_w = CLS,
+ CLS_x = CLS | SixtyFourBits,
+
+ // Pointer authentication instructions in Armv8.3.
+ PACIA = DataProcessing1SourceFixed | 0x80010000,
+ PACIB = DataProcessing1SourceFixed | 0x80010400,
+ PACDA = DataProcessing1SourceFixed | 0x80010800,
+ PACDB = DataProcessing1SourceFixed | 0x80010C00,
+ AUTIA = DataProcessing1SourceFixed | 0x80011000,
+ AUTIB = DataProcessing1SourceFixed | 0x80011400,
+ AUTDA = DataProcessing1SourceFixed | 0x80011800,
+ AUTDB = DataProcessing1SourceFixed | 0x80011C00,
+ PACIZA = DataProcessing1SourceFixed | 0x80012000,
+ PACIZB = DataProcessing1SourceFixed | 0x80012400,
+ PACDZA = DataProcessing1SourceFixed | 0x80012800,
+ PACDZB = DataProcessing1SourceFixed | 0x80012C00,
+ AUTIZA = DataProcessing1SourceFixed | 0x80013000,
+ AUTIZB = DataProcessing1SourceFixed | 0x80013400,
+ AUTDZA = DataProcessing1SourceFixed | 0x80013800,
+ AUTDZB = DataProcessing1SourceFixed | 0x80013C00,
+ XPACI = DataProcessing1SourceFixed | 0x80014000,
+ XPACD = DataProcessing1SourceFixed | 0x80014400
+};
+
+// Data processing 2 source.
+enum DataProcessing2SourceOp : uint32_t {
+ DataProcessing2SourceFixed = 0x1AC00000,
+ DataProcessing2SourceFMask = 0x5FE00000,
+ DataProcessing2SourceMask = 0xFFE0FC00,
+ UDIV_w = DataProcessing2SourceFixed | 0x00000800,
+ UDIV_x = DataProcessing2SourceFixed | 0x80000800,
+ UDIV = UDIV_w,
+ SDIV_w = DataProcessing2SourceFixed | 0x00000C00,
+ SDIV_x = DataProcessing2SourceFixed | 0x80000C00,
+ SDIV = SDIV_w,
+ LSLV_w = DataProcessing2SourceFixed | 0x00002000,
+ LSLV_x = DataProcessing2SourceFixed | 0x80002000,
+ LSLV = LSLV_w,
+ LSRV_w = DataProcessing2SourceFixed | 0x00002400,
+ LSRV_x = DataProcessing2SourceFixed | 0x80002400,
+ LSRV = LSRV_w,
+ ASRV_w = DataProcessing2SourceFixed | 0x00002800,
+ ASRV_x = DataProcessing2SourceFixed | 0x80002800,
+ ASRV = ASRV_w,
+ RORV_w = DataProcessing2SourceFixed | 0x00002C00,
+ RORV_x = DataProcessing2SourceFixed | 0x80002C00,
+ RORV = RORV_w,
+ PACGA = DataProcessing2SourceFixed | SixtyFourBits | 0x00003000,
+ CRC32B = DataProcessing2SourceFixed | 0x00004000,
+ CRC32H = DataProcessing2SourceFixed | 0x00004400,
+ CRC32W = DataProcessing2SourceFixed | 0x00004800,
+ CRC32X = DataProcessing2SourceFixed | SixtyFourBits | 0x00004C00,
+ CRC32CB = DataProcessing2SourceFixed | 0x00005000,
+ CRC32CH = DataProcessing2SourceFixed | 0x00005400,
+ CRC32CW = DataProcessing2SourceFixed | 0x00005800,
+ CRC32CX = DataProcessing2SourceFixed | SixtyFourBits | 0x00005C00
+};
+
+// Data processing 3 source.
+enum DataProcessing3SourceOp : uint32_t {
+ DataProcessing3SourceFixed = 0x1B000000,
+ DataProcessing3SourceFMask = 0x1F000000,
+ DataProcessing3SourceMask = 0xFFE08000,
+ MADD_w = DataProcessing3SourceFixed | 0x00000000,
+ MADD_x = DataProcessing3SourceFixed | 0x80000000,
+ MADD = MADD_w,
+ MSUB_w = DataProcessing3SourceFixed | 0x00008000,
+ MSUB_x = DataProcessing3SourceFixed | 0x80008000,
+ MSUB = MSUB_w,
+ SMADDL_x = DataProcessing3SourceFixed | 0x80200000,
+ SMSUBL_x = DataProcessing3SourceFixed | 0x80208000,
+ SMULH_x = DataProcessing3SourceFixed | 0x80400000,
+ UMADDL_x = DataProcessing3SourceFixed | 0x80A00000,
+ UMSUBL_x = DataProcessing3SourceFixed | 0x80A08000,
+ UMULH_x = DataProcessing3SourceFixed | 0x80C00000
+};
+
+// Floating point compare.
+enum FPCompareOp : uint32_t {
+ FPCompareFixed = 0x1E202000,
+ FPCompareFMask = 0x5F203C00,
+ FPCompareMask = 0xFFE0FC1F,
+ FCMP_h = FPCompareFixed | FP16 | 0x00000000,
+ FCMP_s = FPCompareFixed | 0x00000000,
+ FCMP_d = FPCompareFixed | FP64 | 0x00000000,
+ FCMP = FCMP_s,
+ FCMP_h_zero = FPCompareFixed | FP16 | 0x00000008,
+ FCMP_s_zero = FPCompareFixed | 0x00000008,
+ FCMP_d_zero = FPCompareFixed | FP64 | 0x00000008,
+ FCMP_zero = FCMP_s_zero,
+ FCMPE_h = FPCompareFixed | FP16 | 0x00000010,
+ FCMPE_s = FPCompareFixed | 0x00000010,
+ FCMPE_d = FPCompareFixed | FP64 | 0x00000010,
+ FCMPE = FCMPE_s,
+ FCMPE_h_zero = FPCompareFixed | FP16 | 0x00000018,
+ FCMPE_s_zero = FPCompareFixed | 0x00000018,
+ FCMPE_d_zero = FPCompareFixed | FP64 | 0x00000018,
+ FCMPE_zero = FCMPE_s_zero
+};
+
+// Floating point conditional compare.
+enum FPConditionalCompareOp : uint32_t {
+ FPConditionalCompareFixed = 0x1E200400,
+ FPConditionalCompareFMask = 0x5F200C00,
+ FPConditionalCompareMask = 0xFFE00C10,
+ FCCMP_h = FPConditionalCompareFixed | FP16 | 0x00000000,
+ FCCMP_s = FPConditionalCompareFixed | 0x00000000,
+ FCCMP_d = FPConditionalCompareFixed | FP64 | 0x00000000,
+ FCCMP = FCCMP_s,
+ FCCMPE_h = FPConditionalCompareFixed | FP16 | 0x00000010,
+ FCCMPE_s = FPConditionalCompareFixed | 0x00000010,
+ FCCMPE_d = FPConditionalCompareFixed | FP64 | 0x00000010,
+ FCCMPE = FCCMPE_s
+};
+
+// Floating point conditional select.
+enum FPConditionalSelectOp : uint32_t {
+ FPConditionalSelectFixed = 0x1E200C00,
+ FPConditionalSelectFMask = 0x5F200C00,
+ FPConditionalSelectMask = 0xFFE00C00,
+ FCSEL_h = FPConditionalSelectFixed | FP16 | 0x00000000,
+ FCSEL_s = FPConditionalSelectFixed | 0x00000000,
+ FCSEL_d = FPConditionalSelectFixed | FP64 | 0x00000000,
+ FCSEL = FCSEL_s
+};
+
+// Floating point immediate.
+enum FPImmediateOp : uint32_t {
+ FPImmediateFixed = 0x1E201000,
+ FPImmediateFMask = 0x5F201C00,
+ FPImmediateMask = 0xFFE01C00,
+ FMOV_h_imm = FPImmediateFixed | FP16 | 0x00000000,
+ FMOV_s_imm = FPImmediateFixed | 0x00000000,
+ FMOV_d_imm = FPImmediateFixed | FP64 | 0x00000000
+};
+
+// Floating point data processing 1 source.
+enum FPDataProcessing1SourceOp : uint32_t {
+ FPDataProcessing1SourceFixed = 0x1E204000,
+ FPDataProcessing1SourceFMask = 0x5F207C00,
+ FPDataProcessing1SourceMask = 0xFFFFFC00,
+ FMOV_h = FPDataProcessing1SourceFixed | FP16 | 0x00000000,
+ FMOV_s = FPDataProcessing1SourceFixed | 0x00000000,
+ FMOV_d = FPDataProcessing1SourceFixed | FP64 | 0x00000000,
+ FMOV = FMOV_s,
+ FABS_h = FPDataProcessing1SourceFixed | FP16 | 0x00008000,
+ FABS_s = FPDataProcessing1SourceFixed | 0x00008000,
+ FABS_d = FPDataProcessing1SourceFixed | FP64 | 0x00008000,
+ FABS = FABS_s,
+ FNEG_h = FPDataProcessing1SourceFixed | FP16 | 0x00010000,
+ FNEG_s = FPDataProcessing1SourceFixed | 0x00010000,
+ FNEG_d = FPDataProcessing1SourceFixed | FP64 | 0x00010000,
+ FNEG = FNEG_s,
+ FSQRT_h = FPDataProcessing1SourceFixed | FP16 | 0x00018000,
+ FSQRT_s = FPDataProcessing1SourceFixed | 0x00018000,
+ FSQRT_d = FPDataProcessing1SourceFixed | FP64 | 0x00018000,
+ FSQRT = FSQRT_s,
+ FCVT_ds = FPDataProcessing1SourceFixed | 0x00028000,
+ FCVT_sd = FPDataProcessing1SourceFixed | FP64 | 0x00020000,
+ FCVT_hs = FPDataProcessing1SourceFixed | 0x00038000,
+ FCVT_hd = FPDataProcessing1SourceFixed | FP64 | 0x00038000,
+ FCVT_sh = FPDataProcessing1SourceFixed | 0x00C20000,
+ FCVT_dh = FPDataProcessing1SourceFixed | 0x00C28000,
+ FRINT32X_s = FPDataProcessing1SourceFixed | 0x00088000,
+ FRINT32X_d = FPDataProcessing1SourceFixed | FP64 | 0x00088000,
+ FRINT32X = FRINT32X_s,
+ FRINT32Z_s = FPDataProcessing1SourceFixed | 0x00080000,
+ FRINT32Z_d = FPDataProcessing1SourceFixed | FP64 | 0x00080000,
+ FRINT32Z = FRINT32Z_s,
+ FRINT64X_s = FPDataProcessing1SourceFixed | 0x00098000,
+ FRINT64X_d = FPDataProcessing1SourceFixed | FP64 | 0x00098000,
+ FRINT64X = FRINT64X_s,
+ FRINT64Z_s = FPDataProcessing1SourceFixed | 0x00090000,
+ FRINT64Z_d = FPDataProcessing1SourceFixed | FP64 | 0x00090000,
+ FRINT64Z = FRINT64Z_s,
+ FRINTN_h = FPDataProcessing1SourceFixed | FP16 | 0x00040000,
+ FRINTN_s = FPDataProcessing1SourceFixed | 0x00040000,
+ FRINTN_d = FPDataProcessing1SourceFixed | FP64 | 0x00040000,
+ FRINTN = FRINTN_s,
+ FRINTP_h = FPDataProcessing1SourceFixed | FP16 | 0x00048000,
+ FRINTP_s = FPDataProcessing1SourceFixed | 0x00048000,
+ FRINTP_d = FPDataProcessing1SourceFixed | FP64 | 0x00048000,
+ FRINTP = FRINTP_s,
+ FRINTM_h = FPDataProcessing1SourceFixed | FP16 | 0x00050000,
+ FRINTM_s = FPDataProcessing1SourceFixed | 0x00050000,
+ FRINTM_d = FPDataProcessing1SourceFixed | FP64 | 0x00050000,
+ FRINTM = FRINTM_s,
+ FRINTZ_h = FPDataProcessing1SourceFixed | FP16 | 0x00058000,
+ FRINTZ_s = FPDataProcessing1SourceFixed | 0x00058000,
+ FRINTZ_d = FPDataProcessing1SourceFixed | FP64 | 0x00058000,
+ FRINTZ = FRINTZ_s,
+ FRINTA_h = FPDataProcessing1SourceFixed | FP16 | 0x00060000,
+ FRINTA_s = FPDataProcessing1SourceFixed | 0x00060000,
+ FRINTA_d = FPDataProcessing1SourceFixed | FP64 | 0x00060000,
+ FRINTA = FRINTA_s,
+ FRINTX_h = FPDataProcessing1SourceFixed | FP16 | 0x00070000,
+ FRINTX_s = FPDataProcessing1SourceFixed | 0x00070000,
+ FRINTX_d = FPDataProcessing1SourceFixed | FP64 | 0x00070000,
+ FRINTX = FRINTX_s,
+ FRINTI_h = FPDataProcessing1SourceFixed | FP16 | 0x00078000,
+ FRINTI_s = FPDataProcessing1SourceFixed | 0x00078000,
+ FRINTI_d = FPDataProcessing1SourceFixed | FP64 | 0x00078000,
+ FRINTI = FRINTI_s
+};
+
+// Floating point data processing 2 source.
+enum FPDataProcessing2SourceOp : uint32_t {
+ FPDataProcessing2SourceFixed = 0x1E200800,
+ FPDataProcessing2SourceFMask = 0x5F200C00,
+ FPDataProcessing2SourceMask = 0xFFE0FC00,
+ FMUL = FPDataProcessing2SourceFixed | 0x00000000,
+ FMUL_h = FMUL | FP16,
+ FMUL_s = FMUL,
+ FMUL_d = FMUL | FP64,
+ FDIV = FPDataProcessing2SourceFixed | 0x00001000,
+ FDIV_h = FDIV | FP16,
+ FDIV_s = FDIV,
+ FDIV_d = FDIV | FP64,
+ FADD = FPDataProcessing2SourceFixed | 0x00002000,
+ FADD_h = FADD | FP16,
+ FADD_s = FADD,
+ FADD_d = FADD | FP64,
+ FSUB = FPDataProcessing2SourceFixed | 0x00003000,
+ FSUB_h = FSUB | FP16,
+ FSUB_s = FSUB,
+ FSUB_d = FSUB | FP64,
+ FMAX = FPDataProcessing2SourceFixed | 0x00004000,
+ FMAX_h = FMAX | FP16,
+ FMAX_s = FMAX,
+ FMAX_d = FMAX | FP64,
+ FMIN = FPDataProcessing2SourceFixed | 0x00005000,
+ FMIN_h = FMIN | FP16,
+ FMIN_s = FMIN,
+ FMIN_d = FMIN | FP64,
+ FMAXNM = FPDataProcessing2SourceFixed | 0x00006000,
+ FMAXNM_h = FMAXNM | FP16,
+ FMAXNM_s = FMAXNM,
+ FMAXNM_d = FMAXNM | FP64,
+ FMINNM = FPDataProcessing2SourceFixed | 0x00007000,
+ FMINNM_h = FMINNM | FP16,
+ FMINNM_s = FMINNM,
+ FMINNM_d = FMINNM | FP64,
+ FNMUL = FPDataProcessing2SourceFixed | 0x00008000,
+ FNMUL_h = FNMUL | FP16,
+ FNMUL_s = FNMUL,
+ FNMUL_d = FNMUL | FP64
+};
+
+// Floating point data processing 3 source.
+enum FPDataProcessing3SourceOp : uint32_t {
+ FPDataProcessing3SourceFixed = 0x1F000000,
+ FPDataProcessing3SourceFMask = 0x5F000000,
+ FPDataProcessing3SourceMask = 0xFFE08000,
+ FMADD_h = FPDataProcessing3SourceFixed | 0x00C00000,
+ FMSUB_h = FPDataProcessing3SourceFixed | 0x00C08000,
+ FNMADD_h = FPDataProcessing3SourceFixed | 0x00E00000,
+ FNMSUB_h = FPDataProcessing3SourceFixed | 0x00E08000,
+ FMADD_s = FPDataProcessing3SourceFixed | 0x00000000,
+ FMSUB_s = FPDataProcessing3SourceFixed | 0x00008000,
+ FNMADD_s = FPDataProcessing3SourceFixed | 0x00200000,
+ FNMSUB_s = FPDataProcessing3SourceFixed | 0x00208000,
+ FMADD_d = FPDataProcessing3SourceFixed | 0x00400000,
+ FMSUB_d = FPDataProcessing3SourceFixed | 0x00408000,
+ FNMADD_d = FPDataProcessing3SourceFixed | 0x00600000,
+ FNMSUB_d = FPDataProcessing3SourceFixed | 0x00608000
+};
+
+// Conversion between floating point and integer.
+enum FPIntegerConvertOp : uint32_t {
+ FPIntegerConvertFixed = 0x1E200000,
+ FPIntegerConvertFMask = 0x5F20FC00,
+ FPIntegerConvertMask = 0xFFFFFC00,
+ FCVTNS = FPIntegerConvertFixed | 0x00000000,
+ FCVTNS_wh = FCVTNS | FP16,
+ FCVTNS_xh = FCVTNS | SixtyFourBits | FP16,
+ FCVTNS_ws = FCVTNS,
+ FCVTNS_xs = FCVTNS | SixtyFourBits,
+ FCVTNS_wd = FCVTNS | FP64,
+ FCVTNS_xd = FCVTNS | SixtyFourBits | FP64,
+ FCVTNU = FPIntegerConvertFixed | 0x00010000,
+ FCVTNU_wh = FCVTNU | FP16,
+ FCVTNU_xh = FCVTNU | SixtyFourBits | FP16,
+ FCVTNU_ws = FCVTNU,
+ FCVTNU_xs = FCVTNU | SixtyFourBits,
+ FCVTNU_wd = FCVTNU | FP64,
+ FCVTNU_xd = FCVTNU | SixtyFourBits | FP64,
+ FCVTPS = FPIntegerConvertFixed | 0x00080000,
+ FCVTPS_wh = FCVTPS | FP16,
+ FCVTPS_xh = FCVTPS | SixtyFourBits | FP16,
+ FCVTPS_ws = FCVTPS,
+ FCVTPS_xs = FCVTPS | SixtyFourBits,
+ FCVTPS_wd = FCVTPS | FP64,
+ FCVTPS_xd = FCVTPS | SixtyFourBits | FP64,
+ FCVTPU = FPIntegerConvertFixed | 0x00090000,
+ FCVTPU_wh = FCVTPU | FP16,
+ FCVTPU_xh = FCVTPU | SixtyFourBits | FP16,
+ FCVTPU_ws = FCVTPU,
+ FCVTPU_xs = FCVTPU | SixtyFourBits,
+ FCVTPU_wd = FCVTPU | FP64,
+ FCVTPU_xd = FCVTPU | SixtyFourBits | FP64,
+ FCVTMS = FPIntegerConvertFixed | 0x00100000,
+ FCVTMS_wh = FCVTMS | FP16,
+ FCVTMS_xh = FCVTMS | SixtyFourBits | FP16,
+ FCVTMS_ws = FCVTMS,
+ FCVTMS_xs = FCVTMS | SixtyFourBits,
+ FCVTMS_wd = FCVTMS | FP64,
+ FCVTMS_xd = FCVTMS | SixtyFourBits | FP64,
+ FCVTMU = FPIntegerConvertFixed | 0x00110000,
+ FCVTMU_wh = FCVTMU | FP16,
+ FCVTMU_xh = FCVTMU | SixtyFourBits | FP16,
+ FCVTMU_ws = FCVTMU,
+ FCVTMU_xs = FCVTMU | SixtyFourBits,
+ FCVTMU_wd = FCVTMU | FP64,
+ FCVTMU_xd = FCVTMU | SixtyFourBits | FP64,
+ FCVTZS = FPIntegerConvertFixed | 0x00180000,
+ FCVTZS_wh = FCVTZS | FP16,
+ FCVTZS_xh = FCVTZS | SixtyFourBits | FP16,
+ FCVTZS_ws = FCVTZS,
+ FCVTZS_xs = FCVTZS | SixtyFourBits,
+ FCVTZS_wd = FCVTZS | FP64,
+ FCVTZS_xd = FCVTZS | SixtyFourBits | FP64,
+ FCVTZU = FPIntegerConvertFixed | 0x00190000,
+ FCVTZU_wh = FCVTZU | FP16,
+ FCVTZU_xh = FCVTZU | SixtyFourBits | FP16,
+ FCVTZU_ws = FCVTZU,
+ FCVTZU_xs = FCVTZU | SixtyFourBits,
+ FCVTZU_wd = FCVTZU | FP64,
+ FCVTZU_xd = FCVTZU | SixtyFourBits | FP64,
+ SCVTF = FPIntegerConvertFixed | 0x00020000,
+ SCVTF_hw = SCVTF | FP16,
+ SCVTF_hx = SCVTF | SixtyFourBits | FP16,
+ SCVTF_sw = SCVTF,
+ SCVTF_sx = SCVTF | SixtyFourBits,
+ SCVTF_dw = SCVTF | FP64,
+ SCVTF_dx = SCVTF | SixtyFourBits | FP64,
+ UCVTF = FPIntegerConvertFixed | 0x00030000,
+ UCVTF_hw = UCVTF | FP16,
+ UCVTF_hx = UCVTF | SixtyFourBits | FP16,
+ UCVTF_sw = UCVTF,
+ UCVTF_sx = UCVTF | SixtyFourBits,
+ UCVTF_dw = UCVTF | FP64,
+ UCVTF_dx = UCVTF | SixtyFourBits | FP64,
+ FCVTAS = FPIntegerConvertFixed | 0x00040000,
+ FCVTAS_wh = FCVTAS | FP16,
+ FCVTAS_xh = FCVTAS | SixtyFourBits | FP16,
+ FCVTAS_ws = FCVTAS,
+ FCVTAS_xs = FCVTAS | SixtyFourBits,
+ FCVTAS_wd = FCVTAS | FP64,
+ FCVTAS_xd = FCVTAS | SixtyFourBits | FP64,
+ FCVTAU = FPIntegerConvertFixed | 0x00050000,
+ FCVTAU_wh = FCVTAU | FP16,
+ FCVTAU_xh = FCVTAU | SixtyFourBits | FP16,
+ FCVTAU_ws = FCVTAU,
+ FCVTAU_xs = FCVTAU | SixtyFourBits,
+ FCVTAU_wd = FCVTAU | FP64,
+ FCVTAU_xd = FCVTAU | SixtyFourBits | FP64,
+ FMOV_wh = FPIntegerConvertFixed | 0x00060000 | FP16,
+ FMOV_hw = FPIntegerConvertFixed | 0x00070000 | FP16,
+ FMOV_xh = FMOV_wh | SixtyFourBits,
+ FMOV_hx = FMOV_hw | SixtyFourBits,
+ FMOV_ws = FPIntegerConvertFixed | 0x00060000,
+ FMOV_sw = FPIntegerConvertFixed | 0x00070000,
+ FMOV_xd = FMOV_ws | SixtyFourBits | FP64,
+ FMOV_dx = FMOV_sw | SixtyFourBits | FP64,
+ FMOV_d1_x = FPIntegerConvertFixed | SixtyFourBits | 0x008F0000,
+ FMOV_x_d1 = FPIntegerConvertFixed | SixtyFourBits | 0x008E0000,
+ FJCVTZS = FPIntegerConvertFixed | FP64 | 0x001E0000
+};
+
+// Conversion between fixed point and floating point.
+enum FPFixedPointConvertOp : uint32_t {
+ FPFixedPointConvertFixed = 0x1E000000,
+ FPFixedPointConvertFMask = 0x5F200000,
+ FPFixedPointConvertMask = 0xFFFF0000,
+ FCVTZS_fixed = FPFixedPointConvertFixed | 0x00180000,
+ FCVTZS_wh_fixed = FCVTZS_fixed | FP16,
+ FCVTZS_xh_fixed = FCVTZS_fixed | SixtyFourBits | FP16,
+ FCVTZS_ws_fixed = FCVTZS_fixed,
+ FCVTZS_xs_fixed = FCVTZS_fixed | SixtyFourBits,
+ FCVTZS_wd_fixed = FCVTZS_fixed | FP64,
+ FCVTZS_xd_fixed = FCVTZS_fixed | SixtyFourBits | FP64,
+ FCVTZU_fixed = FPFixedPointConvertFixed | 0x00190000,
+ FCVTZU_wh_fixed = FCVTZU_fixed | FP16,
+ FCVTZU_xh_fixed = FCVTZU_fixed | SixtyFourBits | FP16,
+ FCVTZU_ws_fixed = FCVTZU_fixed,
+ FCVTZU_xs_fixed = FCVTZU_fixed | SixtyFourBits,
+ FCVTZU_wd_fixed = FCVTZU_fixed | FP64,
+ FCVTZU_xd_fixed = FCVTZU_fixed | SixtyFourBits | FP64,
+ SCVTF_fixed = FPFixedPointConvertFixed | 0x00020000,
+ SCVTF_hw_fixed = SCVTF_fixed | FP16,
+ SCVTF_hx_fixed = SCVTF_fixed | SixtyFourBits | FP16,
+ SCVTF_sw_fixed = SCVTF_fixed,
+ SCVTF_sx_fixed = SCVTF_fixed | SixtyFourBits,
+ SCVTF_dw_fixed = SCVTF_fixed | FP64,
+ SCVTF_dx_fixed = SCVTF_fixed | SixtyFourBits | FP64,
+ UCVTF_fixed = FPFixedPointConvertFixed | 0x00030000,
+ UCVTF_hw_fixed = UCVTF_fixed | FP16,
+ UCVTF_hx_fixed = UCVTF_fixed | SixtyFourBits | FP16,
+ UCVTF_sw_fixed = UCVTF_fixed,
+ UCVTF_sx_fixed = UCVTF_fixed | SixtyFourBits,
+ UCVTF_dw_fixed = UCVTF_fixed | FP64,
+ UCVTF_dx_fixed = UCVTF_fixed | SixtyFourBits | FP64
+};
+
+// Crypto - two register SHA.
+enum Crypto2RegSHAOp : uint32_t {
+ Crypto2RegSHAFixed = 0x5E280800,
+ Crypto2RegSHAFMask = 0xFF3E0C00
+};
+
+// Crypto - three register SHA.
+enum Crypto3RegSHAOp : uint32_t {
+ Crypto3RegSHAFixed = 0x5E000000,
+ Crypto3RegSHAFMask = 0xFF208C00
+};
+
+// Crypto - AES.
+enum CryptoAESOp : uint32_t {
+ CryptoAESFixed = 0x4E280800,
+ CryptoAESFMask = 0xFF3E0C00
+};
+
+// NEON instructions with two register operands.
+enum NEON2RegMiscOp : uint32_t {
+ NEON2RegMiscFixed = 0x0E200800,
+ NEON2RegMiscFMask = 0x9F3E0C00,
+ NEON2RegMiscMask = 0xBF3FFC00,
+ NEON2RegMiscUBit = 0x20000000,
+ NEON_REV64 = NEON2RegMiscFixed | 0x00000000,
+ NEON_REV32 = NEON2RegMiscFixed | 0x20000000,
+ NEON_REV16 = NEON2RegMiscFixed | 0x00001000,
+ NEON_SADDLP = NEON2RegMiscFixed | 0x00002000,
+ NEON_UADDLP = NEON_SADDLP | NEON2RegMiscUBit,
+ NEON_SUQADD = NEON2RegMiscFixed | 0x00003000,
+ NEON_USQADD = NEON_SUQADD | NEON2RegMiscUBit,
+ NEON_CLS = NEON2RegMiscFixed | 0x00004000,
+ NEON_CLZ = NEON2RegMiscFixed | 0x20004000,
+ NEON_CNT = NEON2RegMiscFixed | 0x00005000,
+ NEON_RBIT_NOT = NEON2RegMiscFixed | 0x20005000,
+ NEON_SADALP = NEON2RegMiscFixed | 0x00006000,
+ NEON_UADALP = NEON_SADALP | NEON2RegMiscUBit,
+ NEON_SQABS = NEON2RegMiscFixed | 0x00007000,
+ NEON_SQNEG = NEON2RegMiscFixed | 0x20007000,
+ NEON_CMGT_zero = NEON2RegMiscFixed | 0x00008000,
+ NEON_CMGE_zero = NEON2RegMiscFixed | 0x20008000,
+ NEON_CMEQ_zero = NEON2RegMiscFixed | 0x00009000,
+ NEON_CMLE_zero = NEON2RegMiscFixed | 0x20009000,
+ NEON_CMLT_zero = NEON2RegMiscFixed | 0x0000A000,
+ NEON_ABS = NEON2RegMiscFixed | 0x0000B000,
+ NEON_NEG = NEON2RegMiscFixed | 0x2000B000,
+ NEON_XTN = NEON2RegMiscFixed | 0x00012000,
+ NEON_SQXTUN = NEON2RegMiscFixed | 0x20012000,
+ NEON_SHLL = NEON2RegMiscFixed | 0x20013000,
+ NEON_SQXTN = NEON2RegMiscFixed | 0x00014000,
+ NEON_UQXTN = NEON_SQXTN | NEON2RegMiscUBit,
+
+ NEON2RegMiscOpcode = 0x0001F000,
+ NEON_RBIT_NOT_opcode = NEON_RBIT_NOT & NEON2RegMiscOpcode,
+ NEON_NEG_opcode = NEON_NEG & NEON2RegMiscOpcode,
+ NEON_XTN_opcode = NEON_XTN & NEON2RegMiscOpcode,
+ NEON_UQXTN_opcode = NEON_UQXTN & NEON2RegMiscOpcode,
+
+ // These instructions use only one bit of the size field. The other bit is
+ // used to distinguish between instructions.
+ NEON2RegMiscFPMask = NEON2RegMiscMask | 0x00800000,
+ NEON_FABS = NEON2RegMiscFixed | 0x0080F000,
+ NEON_FNEG = NEON2RegMiscFixed | 0x2080F000,
+ NEON_FCVTN = NEON2RegMiscFixed | 0x00016000,
+ NEON_FCVTXN = NEON2RegMiscFixed | 0x20016000,
+ NEON_FCVTL = NEON2RegMiscFixed | 0x00017000,
+ NEON_FRINT32X = NEON2RegMiscFixed | 0x2001E000,
+ NEON_FRINT32Z = NEON2RegMiscFixed | 0x0001E000,
+ NEON_FRINT64X = NEON2RegMiscFixed | 0x2001F000,
+ NEON_FRINT64Z = NEON2RegMiscFixed | 0x0001F000,
+ NEON_FRINTN = NEON2RegMiscFixed | 0x00018000,
+ NEON_FRINTA = NEON2RegMiscFixed | 0x20018000,
+ NEON_FRINTP = NEON2RegMiscFixed | 0x00818000,
+ NEON_FRINTM = NEON2RegMiscFixed | 0x00019000,
+ NEON_FRINTX = NEON2RegMiscFixed | 0x20019000,
+ NEON_FRINTZ = NEON2RegMiscFixed | 0x00819000,
+ NEON_FRINTI = NEON2RegMiscFixed | 0x20819000,
+ NEON_FCVTNS = NEON2RegMiscFixed | 0x0001A000,
+ NEON_FCVTNU = NEON_FCVTNS | NEON2RegMiscUBit,
+ NEON_FCVTPS = NEON2RegMiscFixed | 0x0081A000,
+ NEON_FCVTPU = NEON_FCVTPS | NEON2RegMiscUBit,
+ NEON_FCVTMS = NEON2RegMiscFixed | 0x0001B000,
+ NEON_FCVTMU = NEON_FCVTMS | NEON2RegMiscUBit,
+ NEON_FCVTZS = NEON2RegMiscFixed | 0x0081B000,
+ NEON_FCVTZU = NEON_FCVTZS | NEON2RegMiscUBit,
+ NEON_FCVTAS = NEON2RegMiscFixed | 0x0001C000,
+ NEON_FCVTAU = NEON_FCVTAS | NEON2RegMiscUBit,
+ NEON_FSQRT = NEON2RegMiscFixed | 0x2081F000,
+ NEON_SCVTF = NEON2RegMiscFixed | 0x0001D000,
+ NEON_UCVTF = NEON_SCVTF | NEON2RegMiscUBit,
+ NEON_URSQRTE = NEON2RegMiscFixed | 0x2081C000,
+ NEON_URECPE = NEON2RegMiscFixed | 0x0081C000,
+ NEON_FRSQRTE = NEON2RegMiscFixed | 0x2081D000,
+ NEON_FRECPE = NEON2RegMiscFixed | 0x0081D000,
+ NEON_FCMGT_zero = NEON2RegMiscFixed | 0x0080C000,
+ NEON_FCMGE_zero = NEON2RegMiscFixed | 0x2080C000,
+ NEON_FCMEQ_zero = NEON2RegMiscFixed | 0x0080D000,
+ NEON_FCMLE_zero = NEON2RegMiscFixed | 0x2080D000,
+ NEON_FCMLT_zero = NEON2RegMiscFixed | 0x0080E000,
+
+ NEON_FCVTL_opcode = NEON_FCVTL & NEON2RegMiscOpcode,
+ NEON_FCVTN_opcode = NEON_FCVTN & NEON2RegMiscOpcode
+};
+
+// NEON instructions with two register operands (FP16).
+enum NEON2RegMiscFP16Op : uint32_t {
+ NEON2RegMiscFP16Fixed = 0x0E780800,
+ NEON2RegMiscFP16FMask = 0x9F7E0C00,
+ NEON2RegMiscFP16Mask = 0xBFFFFC00,
+ NEON_FRINTN_H = NEON2RegMiscFP16Fixed | 0x00018000,
+ NEON_FRINTM_H = NEON2RegMiscFP16Fixed | 0x00019000,
+ NEON_FCVTNS_H = NEON2RegMiscFP16Fixed | 0x0001A000,
+ NEON_FCVTMS_H = NEON2RegMiscFP16Fixed | 0x0001B000,
+ NEON_FCVTAS_H = NEON2RegMiscFP16Fixed | 0x0001C000,
+ NEON_SCVTF_H = NEON2RegMiscFP16Fixed | 0x0001D000,
+ NEON_FCMGT_H_zero = NEON2RegMiscFP16Fixed | 0x0080C000,
+ NEON_FCMEQ_H_zero = NEON2RegMiscFP16Fixed | 0x0080D000,
+ NEON_FCMLT_H_zero = NEON2RegMiscFP16Fixed | 0x0080E000,
+ NEON_FABS_H = NEON2RegMiscFP16Fixed | 0x0080F000,
+ NEON_FRINTP_H = NEON2RegMiscFP16Fixed | 0x00818000,
+ NEON_FRINTZ_H = NEON2RegMiscFP16Fixed | 0x00819000,
+ NEON_FCVTPS_H = NEON2RegMiscFP16Fixed | 0x0081A000,
+ NEON_FCVTZS_H = NEON2RegMiscFP16Fixed | 0x0081B000,
+ NEON_FRECPE_H = NEON2RegMiscFP16Fixed | 0x0081D000,
+ NEON_FRINTA_H = NEON2RegMiscFP16Fixed | 0x20018000,
+ NEON_FRINTX_H = NEON2RegMiscFP16Fixed | 0x20019000,
+ NEON_FCVTNU_H = NEON2RegMiscFP16Fixed | 0x2001A000,
+ NEON_FCVTMU_H = NEON2RegMiscFP16Fixed | 0x2001B000,
+ NEON_FCVTAU_H = NEON2RegMiscFP16Fixed | 0x2001C000,
+ NEON_UCVTF_H = NEON2RegMiscFP16Fixed | 0x2001D000,
+ NEON_FCMGE_H_zero = NEON2RegMiscFP16Fixed | 0x2080C000,
+ NEON_FCMLE_H_zero = NEON2RegMiscFP16Fixed | 0x2080D000,
+ NEON_FNEG_H = NEON2RegMiscFP16Fixed | 0x2080F000,
+ NEON_FRINTI_H = NEON2RegMiscFP16Fixed | 0x20819000,
+ NEON_FCVTPU_H = NEON2RegMiscFP16Fixed | 0x2081A000,
+ NEON_FCVTZU_H = NEON2RegMiscFP16Fixed | 0x2081B000,
+ NEON_FRSQRTE_H = NEON2RegMiscFP16Fixed | 0x2081D000,
+ NEON_FSQRT_H = NEON2RegMiscFP16Fixed | 0x2081F000
+};
+
+// NEON instructions with three same-type operands.
+enum NEON3SameOp : uint32_t {
+ NEON3SameFixed = 0x0E200400,
+ NEON3SameFMask = 0x9F200400,
+ NEON3SameMask = 0xBF20FC00,
+ NEON3SameUBit = 0x20000000,
+ NEON_ADD = NEON3SameFixed | 0x00008000,
+ NEON_ADDP = NEON3SameFixed | 0x0000B800,
+ NEON_SHADD = NEON3SameFixed | 0x00000000,
+ NEON_SHSUB = NEON3SameFixed | 0x00002000,
+ NEON_SRHADD = NEON3SameFixed | 0x00001000,
+ NEON_CMEQ = NEON3SameFixed | NEON3SameUBit | 0x00008800,
+ NEON_CMGE = NEON3SameFixed | 0x00003800,
+ NEON_CMGT = NEON3SameFixed | 0x00003000,
+ NEON_CMHI = NEON3SameFixed | NEON3SameUBit | NEON_CMGT,
+ NEON_CMHS = NEON3SameFixed | NEON3SameUBit | NEON_CMGE,
+ NEON_CMTST = NEON3SameFixed | 0x00008800,
+ NEON_MLA = NEON3SameFixed | 0x00009000,
+ NEON_MLS = NEON3SameFixed | 0x20009000,
+ NEON_MUL = NEON3SameFixed | 0x00009800,
+ NEON_PMUL = NEON3SameFixed | 0x20009800,
+ NEON_SRSHL = NEON3SameFixed | 0x00005000,
+ NEON_SQSHL = NEON3SameFixed | 0x00004800,
+ NEON_SQRSHL = NEON3SameFixed | 0x00005800,
+ NEON_SSHL = NEON3SameFixed | 0x00004000,
+ NEON_SMAX = NEON3SameFixed | 0x00006000,
+ NEON_SMAXP = NEON3SameFixed | 0x0000A000,
+ NEON_SMIN = NEON3SameFixed | 0x00006800,
+ NEON_SMINP = NEON3SameFixed | 0x0000A800,
+ NEON_SABD = NEON3SameFixed | 0x00007000,
+ NEON_SABA = NEON3SameFixed | 0x00007800,
+ NEON_UABD = NEON3SameFixed | NEON3SameUBit | NEON_SABD,
+ NEON_UABA = NEON3SameFixed | NEON3SameUBit | NEON_SABA,
+ NEON_SQADD = NEON3SameFixed | 0x00000800,
+ NEON_SQSUB = NEON3SameFixed | 0x00002800,
+ NEON_SUB = NEON3SameFixed | NEON3SameUBit | 0x00008000,
+ NEON_UHADD = NEON3SameFixed | NEON3SameUBit | NEON_SHADD,
+ NEON_UHSUB = NEON3SameFixed | NEON3SameUBit | NEON_SHSUB,
+ NEON_URHADD = NEON3SameFixed | NEON3SameUBit | NEON_SRHADD,
+ NEON_UMAX = NEON3SameFixed | NEON3SameUBit | NEON_SMAX,
+ NEON_UMAXP = NEON3SameFixed | NEON3SameUBit | NEON_SMAXP,
+ NEON_UMIN = NEON3SameFixed | NEON3SameUBit | NEON_SMIN,
+ NEON_UMINP = NEON3SameFixed | NEON3SameUBit | NEON_SMINP,
+ NEON_URSHL = NEON3SameFixed | NEON3SameUBit | NEON_SRSHL,
+ NEON_UQADD = NEON3SameFixed | NEON3SameUBit | NEON_SQADD,
+ NEON_UQRSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQRSHL,
+ NEON_UQSHL = NEON3SameFixed | NEON3SameUBit | NEON_SQSHL,
+ NEON_UQSUB = NEON3SameFixed | NEON3SameUBit | NEON_SQSUB,
+ NEON_USHL = NEON3SameFixed | NEON3SameUBit | NEON_SSHL,
+ NEON_SQDMULH = NEON3SameFixed | 0x0000B000,
+ NEON_SQRDMULH = NEON3SameFixed | 0x2000B000,
+
+ // NEON floating point instructions with three same-type operands.
+ NEON3SameFPFixed = NEON3SameFixed | 0x0000C000,
+ NEON3SameFPFMask = NEON3SameFMask | 0x0000C000,
+ NEON3SameFPMask = NEON3SameMask | 0x00800000,
+ NEON_FADD = NEON3SameFixed | 0x0000D000,
+ NEON_FSUB = NEON3SameFixed | 0x0080D000,
+ NEON_FMUL = NEON3SameFixed | 0x2000D800,
+ NEON_FDIV = NEON3SameFixed | 0x2000F800,
+ NEON_FMAX = NEON3SameFixed | 0x0000F000,
+ NEON_FMAXNM = NEON3SameFixed | 0x0000C000,
+ NEON_FMAXP = NEON3SameFixed | 0x2000F000,
+ NEON_FMAXNMP = NEON3SameFixed | 0x2000C000,
+ NEON_FMIN = NEON3SameFixed | 0x0080F000,
+ NEON_FMINNM = NEON3SameFixed | 0x0080C000,
+ NEON_FMINP = NEON3SameFixed | 0x2080F000,
+ NEON_FMINNMP = NEON3SameFixed | 0x2080C000,
+ NEON_FMLA = NEON3SameFixed | 0x0000C800,
+ NEON_FMLS = NEON3SameFixed | 0x0080C800,
+ NEON_FMULX = NEON3SameFixed | 0x0000D800,
+ NEON_FRECPS = NEON3SameFixed | 0x0000F800,
+ NEON_FRSQRTS = NEON3SameFixed | 0x0080F800,
+ NEON_FABD = NEON3SameFixed | 0x2080D000,
+ NEON_FADDP = NEON3SameFixed | 0x2000D000,
+ NEON_FCMEQ = NEON3SameFixed | 0x0000E000,
+ NEON_FCMGE = NEON3SameFixed | 0x2000E000,
+ NEON_FCMGT = NEON3SameFixed | 0x2080E000,
+ NEON_FACGE = NEON3SameFixed | 0x2000E800,
+ NEON_FACGT = NEON3SameFixed | 0x2080E800,
+
+ // NEON logical instructions with three same-type operands.
+ NEON3SameLogicalFixed = NEON3SameFixed | 0x00001800,
+ NEON3SameLogicalFMask = NEON3SameFMask | 0x0000F800,
+ NEON3SameLogicalMask = 0xBFE0FC00,
+ NEON3SameLogicalFormatMask = NEON_Q,
+ NEON_AND = NEON3SameLogicalFixed | 0x00000000,
+ NEON_ORR = NEON3SameLogicalFixed | 0x00A00000,
+ NEON_ORN = NEON3SameLogicalFixed | 0x00C00000,
+ NEON_EOR = NEON3SameLogicalFixed | 0x20000000,
+ NEON_BIC = NEON3SameLogicalFixed | 0x00400000,
+ NEON_BIF = NEON3SameLogicalFixed | 0x20C00000,
+ NEON_BIT = NEON3SameLogicalFixed | 0x20800000,
+ NEON_BSL = NEON3SameLogicalFixed | 0x20400000,
+
+ // FHM (FMLAL-like) instructions have an oddball encoding scheme under 3Same.
+ NEON3SameFHMMask = 0xBFE0FC00, // U size opcode
+ NEON_FMLAL = NEON3SameFixed | 0x0000E800, // 0 00 11101
+ NEON_FMLAL2 = NEON3SameFixed | 0x2000C800, // 1 00 11001
+ NEON_FMLSL = NEON3SameFixed | 0x0080E800, // 0 10 11101
+ NEON_FMLSL2 = NEON3SameFixed | 0x2080C800 // 1 10 11001
+};
+
+enum NEON3SameFP16 : uint32_t {
+ NEON3SameFP16Fixed = 0x0E400400,
+ NEON3SameFP16FMask = 0x9F60C400,
+ NEON3SameFP16Mask = 0xBFE0FC00,
+ NEON_FMAXNM_H = NEON3SameFP16Fixed | 0x00000000,
+ NEON_FMLA_H = NEON3SameFP16Fixed | 0x00000800,
+ NEON_FADD_H = NEON3SameFP16Fixed | 0x00001000,
+ NEON_FMULX_H = NEON3SameFP16Fixed | 0x00001800,
+ NEON_FCMEQ_H = NEON3SameFP16Fixed | 0x00002000,
+ NEON_FMAX_H = NEON3SameFP16Fixed | 0x00003000,
+ NEON_FRECPS_H = NEON3SameFP16Fixed | 0x00003800,
+ NEON_FMINNM_H = NEON3SameFP16Fixed | 0x00800000,
+ NEON_FMLS_H = NEON3SameFP16Fixed | 0x00800800,
+ NEON_FSUB_H = NEON3SameFP16Fixed | 0x00801000,
+ NEON_FMIN_H = NEON3SameFP16Fixed | 0x00803000,
+ NEON_FRSQRTS_H = NEON3SameFP16Fixed | 0x00803800,
+ NEON_FMAXNMP_H = NEON3SameFP16Fixed | 0x20000000,
+ NEON_FADDP_H = NEON3SameFP16Fixed | 0x20001000,
+ NEON_FMUL_H = NEON3SameFP16Fixed | 0x20001800,
+ NEON_FCMGE_H = NEON3SameFP16Fixed | 0x20002000,
+ NEON_FACGE_H = NEON3SameFP16Fixed | 0x20002800,
+ NEON_FMAXP_H = NEON3SameFP16Fixed | 0x20003000,
+ NEON_FDIV_H = NEON3SameFP16Fixed | 0x20003800,
+ NEON_FMINNMP_H = NEON3SameFP16Fixed | 0x20800000,
+ NEON_FABD_H = NEON3SameFP16Fixed | 0x20801000,
+ NEON_FCMGT_H = NEON3SameFP16Fixed | 0x20802000,
+ NEON_FACGT_H = NEON3SameFP16Fixed | 0x20802800,
+ NEON_FMINP_H = NEON3SameFP16Fixed | 0x20803000
+};
+
+// 'Extra' NEON instructions with three same-type operands.
+enum NEON3SameExtraOp : uint32_t {
+ NEON3SameExtraFixed = 0x0E008400,
+ NEON3SameExtraUBit = 0x20000000,
+ NEON3SameExtraFMask = 0x9E208400,
+ NEON3SameExtraMask = 0xBE20FC00,
+ NEON_SQRDMLAH = NEON3SameExtraFixed | NEON3SameExtraUBit,
+ NEON_SQRDMLSH = NEON3SameExtraFixed | NEON3SameExtraUBit | 0x00000800,
+ NEON_SDOT = NEON3SameExtraFixed | 0x00001000,
+ NEON_UDOT = NEON3SameExtraFixed | NEON3SameExtraUBit | 0x00001000,
+
+ /* v8.3 Complex Numbers */
+ NEON3SameExtraFCFixed = 0x2E00C400,
+ NEON3SameExtraFCFMask = 0xBF20C400,
+ // FCMLA fixes opcode<3:2>, and uses opcode<1:0> to encode <rotate>.
+ NEON3SameExtraFCMLAMask = NEON3SameExtraFCFMask | 0x00006000,
+ NEON_FCMLA = NEON3SameExtraFCFixed,
+ // FCADD fixes opcode<3:2, 0>, and uses opcode<1> to encode <rotate>.
+ NEON3SameExtraFCADDMask = NEON3SameExtraFCFMask | 0x00006800,
+ NEON_FCADD = NEON3SameExtraFCFixed | 0x00002000
+ // Other encodings under NEON3SameExtraFCFMask are UNALLOCATED.
+};
+
+// NEON instructions with three different-type operands.
+enum NEON3DifferentOp : uint32_t {
+ NEON3DifferentFixed = 0x0E200000,
+ NEON3DifferentFMask = 0x9F200C00,
+ NEON3DifferentMask = 0xFF20FC00,
+ NEON_ADDHN = NEON3DifferentFixed | 0x00004000,
+ NEON_ADDHN2 = NEON_ADDHN | NEON_Q,
+ NEON_PMULL = NEON3DifferentFixed | 0x0000E000,
+ NEON_PMULL2 = NEON_PMULL | NEON_Q,
+ NEON_RADDHN = NEON3DifferentFixed | 0x20004000,
+ NEON_RADDHN2 = NEON_RADDHN | NEON_Q,
+ NEON_RSUBHN = NEON3DifferentFixed | 0x20006000,
+ NEON_RSUBHN2 = NEON_RSUBHN | NEON_Q,
+ NEON_SABAL = NEON3DifferentFixed | 0x00005000,
+ NEON_SABAL2 = NEON_SABAL | NEON_Q,
+ NEON_SABDL = NEON3DifferentFixed | 0x00007000,
+ NEON_SABDL2 = NEON_SABDL | NEON_Q,
+ NEON_SADDL = NEON3DifferentFixed | 0x00000000,
+ NEON_SADDL2 = NEON_SADDL | NEON_Q,
+ NEON_SADDW = NEON3DifferentFixed | 0x00001000,
+ NEON_SADDW2 = NEON_SADDW | NEON_Q,
+ NEON_SMLAL = NEON3DifferentFixed | 0x00008000,
+ NEON_SMLAL2 = NEON_SMLAL | NEON_Q,
+ NEON_SMLSL = NEON3DifferentFixed | 0x0000A000,
+ NEON_SMLSL2 = NEON_SMLSL | NEON_Q,
+ NEON_SMULL = NEON3DifferentFixed | 0x0000C000,
+ NEON_SMULL2 = NEON_SMULL | NEON_Q,
+ NEON_SSUBL = NEON3DifferentFixed | 0x00002000,
+ NEON_SSUBL2 = NEON_SSUBL | NEON_Q,
+ NEON_SSUBW = NEON3DifferentFixed | 0x00003000,
+ NEON_SSUBW2 = NEON_SSUBW | NEON_Q,
+ NEON_SQDMLAL = NEON3DifferentFixed | 0x00009000,
+ NEON_SQDMLAL2 = NEON_SQDMLAL | NEON_Q,
+ NEON_SQDMLSL = NEON3DifferentFixed | 0x0000B000,
+ NEON_SQDMLSL2 = NEON_SQDMLSL | NEON_Q,
+ NEON_SQDMULL = NEON3DifferentFixed | 0x0000D000,
+ NEON_SQDMULL2 = NEON_SQDMULL | NEON_Q,
+ NEON_SUBHN = NEON3DifferentFixed | 0x00006000,
+ NEON_SUBHN2 = NEON_SUBHN | NEON_Q,
+ NEON_UABAL = NEON_SABAL | NEON3SameUBit,
+ NEON_UABAL2 = NEON_UABAL | NEON_Q,
+ NEON_UABDL = NEON_SABDL | NEON3SameUBit,
+ NEON_UABDL2 = NEON_UABDL | NEON_Q,
+ NEON_UADDL = NEON_SADDL | NEON3SameUBit,
+ NEON_UADDL2 = NEON_UADDL | NEON_Q,
+ NEON_UADDW = NEON_SADDW | NEON3SameUBit,
+ NEON_UADDW2 = NEON_UADDW | NEON_Q,
+ NEON_UMLAL = NEON_SMLAL | NEON3SameUBit,
+ NEON_UMLAL2 = NEON_UMLAL | NEON_Q,
+ NEON_UMLSL = NEON_SMLSL | NEON3SameUBit,
+ NEON_UMLSL2 = NEON_UMLSL | NEON_Q,
+ NEON_UMULL = NEON_SMULL | NEON3SameUBit,
+ NEON_UMULL2 = NEON_UMULL | NEON_Q,
+ NEON_USUBL = NEON_SSUBL | NEON3SameUBit,
+ NEON_USUBL2 = NEON_USUBL | NEON_Q,
+ NEON_USUBW = NEON_SSUBW | NEON3SameUBit,
+ NEON_USUBW2 = NEON_USUBW | NEON_Q
+};
+
+// NEON instructions operating across vectors.
+enum NEONAcrossLanesOp : uint32_t {
+ NEONAcrossLanesFixed = 0x0E300800,
+ NEONAcrossLanesFMask = 0x9F3E0C00,
+ NEONAcrossLanesMask = 0xBF3FFC00,
+ NEON_ADDV = NEONAcrossLanesFixed | 0x0001B000,
+ NEON_SADDLV = NEONAcrossLanesFixed | 0x00003000,
+ NEON_UADDLV = NEONAcrossLanesFixed | 0x20003000,
+ NEON_SMAXV = NEONAcrossLanesFixed | 0x0000A000,
+ NEON_SMINV = NEONAcrossLanesFixed | 0x0001A000,
+ NEON_UMAXV = NEONAcrossLanesFixed | 0x2000A000,
+ NEON_UMINV = NEONAcrossLanesFixed | 0x2001A000,
+
+ NEONAcrossLanesFP16Fixed = NEONAcrossLanesFixed | 0x0000C000,
+ NEONAcrossLanesFP16FMask = NEONAcrossLanesFMask | 0x2000C000,
+ NEONAcrossLanesFP16Mask = NEONAcrossLanesMask | 0x20800000,
+ NEON_FMAXNMV_H = NEONAcrossLanesFP16Fixed | 0x00000000,
+ NEON_FMAXV_H = NEONAcrossLanesFP16Fixed | 0x00003000,
+ NEON_FMINNMV_H = NEONAcrossLanesFP16Fixed | 0x00800000,
+ NEON_FMINV_H = NEONAcrossLanesFP16Fixed | 0x00803000,
+
+ // NEON floating point across instructions.
+ NEONAcrossLanesFPFixed = NEONAcrossLanesFixed | 0x2000C000,
+ NEONAcrossLanesFPFMask = NEONAcrossLanesFMask | 0x2000C000,
+ NEONAcrossLanesFPMask = NEONAcrossLanesMask | 0x20800000,
+
+ NEON_FMAXV = NEONAcrossLanesFPFixed | 0x2000F000,
+ NEON_FMINV = NEONAcrossLanesFPFixed | 0x2080F000,
+ NEON_FMAXNMV = NEONAcrossLanesFPFixed | 0x2000C000,
+ NEON_FMINNMV = NEONAcrossLanesFPFixed | 0x2080C000
+};
+
+// NEON instructions with indexed element operand.
+enum NEONByIndexedElementOp : uint32_t {
+ NEONByIndexedElementFixed = 0x0F000000,
+ NEONByIndexedElementFMask = 0x9F000400,
+ NEONByIndexedElementMask = 0xBF00F400,
+ NEON_MUL_byelement = NEONByIndexedElementFixed | 0x00008000,
+ NEON_MLA_byelement = NEONByIndexedElementFixed | 0x20000000,
+ NEON_MLS_byelement = NEONByIndexedElementFixed | 0x20004000,
+ NEON_SMULL_byelement = NEONByIndexedElementFixed | 0x0000A000,
+ NEON_SMLAL_byelement = NEONByIndexedElementFixed | 0x00002000,
+ NEON_SMLSL_byelement = NEONByIndexedElementFixed | 0x00006000,
+ NEON_UMULL_byelement = NEONByIndexedElementFixed | 0x2000A000,
+ NEON_UMLAL_byelement = NEONByIndexedElementFixed | 0x20002000,
+ NEON_UMLSL_byelement = NEONByIndexedElementFixed | 0x20006000,
+ NEON_SQDMULL_byelement = NEONByIndexedElementFixed | 0x0000B000,
+ NEON_SQDMLAL_byelement = NEONByIndexedElementFixed | 0x00003000,
+ NEON_SQDMLSL_byelement = NEONByIndexedElementFixed | 0x00007000,
+ NEON_SQDMULH_byelement = NEONByIndexedElementFixed | 0x0000C000,
+ NEON_SQRDMULH_byelement = NEONByIndexedElementFixed | 0x0000D000,
+ NEON_SDOT_byelement = NEONByIndexedElementFixed | 0x0000E000,
+ NEON_SQRDMLAH_byelement = NEONByIndexedElementFixed | 0x2000D000,
+ NEON_UDOT_byelement = NEONByIndexedElementFixed | 0x2000E000,
+ NEON_SQRDMLSH_byelement = NEONByIndexedElementFixed | 0x2000F000,
+
+ NEON_FMLA_H_byelement = NEONByIndexedElementFixed | 0x00001000,
+ NEON_FMLS_H_byelement = NEONByIndexedElementFixed | 0x00005000,
+ NEON_FMUL_H_byelement = NEONByIndexedElementFixed | 0x00009000,
+ NEON_FMULX_H_byelement = NEONByIndexedElementFixed | 0x20009000,
+
+ // Floating point instructions.
+ NEONByIndexedElementFPFixed = NEONByIndexedElementFixed | 0x00800000,
+ NEONByIndexedElementFPMask = NEONByIndexedElementMask | 0x00800000,
+ NEON_FMLA_byelement = NEONByIndexedElementFPFixed | 0x00001000,
+ NEON_FMLS_byelement = NEONByIndexedElementFPFixed | 0x00005000,
+ NEON_FMUL_byelement = NEONByIndexedElementFPFixed | 0x00009000,
+ NEON_FMULX_byelement = NEONByIndexedElementFPFixed | 0x20009000,
+
+ // FMLAL-like instructions.
+ // For all cases: U = x, size = 10, opcode = xx00
+ NEONByIndexedElementFPLongFixed = NEONByIndexedElementFixed | 0x00800000,
+ NEONByIndexedElementFPLongFMask = NEONByIndexedElementFMask | 0x00C03000,
+ NEONByIndexedElementFPLongMask = 0xBFC0F400,
+ NEON_FMLAL_H_byelement = NEONByIndexedElementFixed | 0x00800000,
+ NEON_FMLAL2_H_byelement = NEONByIndexedElementFixed | 0x20808000,
+ NEON_FMLSL_H_byelement = NEONByIndexedElementFixed | 0x00804000,
+ NEON_FMLSL2_H_byelement = NEONByIndexedElementFixed | 0x2080C000,
+
+ // Complex instruction(s).
+ // This is necessary because the 'rot' encoding moves into the
+ // NEONByIndex..Mask space.
+ NEONByIndexedElementFPComplexMask = 0xBF009400,
+ NEON_FCMLA_byelement = NEONByIndexedElementFixed | 0x20001000
+};
+
+// NEON register copy.
+enum NEONCopyOp : uint32_t {
+ NEONCopyFixed = 0x0E000400,
+ NEONCopyFMask = 0x9FE08400,
+ NEONCopyMask = 0x3FE08400,
+ NEONCopyInsElementMask = NEONCopyMask | 0x40000000,
+ NEONCopyInsGeneralMask = NEONCopyMask | 0x40007800,
+ NEONCopyDupElementMask = NEONCopyMask | 0x20007800,
+ NEONCopyDupGeneralMask = NEONCopyDupElementMask,
+ NEONCopyUmovMask = NEONCopyMask | 0x20007800,
+ NEONCopySmovMask = NEONCopyMask | 0x20007800,
+ NEON_INS_ELEMENT = NEONCopyFixed | 0x60000000,
+ NEON_INS_GENERAL = NEONCopyFixed | 0x40001800,
+ NEON_DUP_ELEMENT = NEONCopyFixed | 0x00000000,
+ NEON_DUP_GENERAL = NEONCopyFixed | 0x00000800,
+ NEON_SMOV = NEONCopyFixed | 0x00002800,
+ NEON_UMOV = NEONCopyFixed | 0x00003800
+};
+
+// NEON extract.
+enum NEONExtractOp : uint32_t {
+ NEONExtractFixed = 0x2E000000,
+ NEONExtractFMask = 0xBF208400,
+ NEONExtractMask = 0xBFE08400,
+ NEON_EXT = NEONExtractFixed | 0x00000000
+};
+
+enum NEONLoadStoreMultiOp : uint32_t {
+ NEONLoadStoreMultiL = 0x00400000,
+ NEONLoadStoreMulti1_1v = 0x00007000,
+ NEONLoadStoreMulti1_2v = 0x0000A000,
+ NEONLoadStoreMulti1_3v = 0x00006000,
+ NEONLoadStoreMulti1_4v = 0x00002000,
+ NEONLoadStoreMulti2 = 0x00008000,
+ NEONLoadStoreMulti3 = 0x00004000,
+ NEONLoadStoreMulti4 = 0x00000000
+};
+
+// NEON load/store multiple structures.
+enum NEONLoadStoreMultiStructOp : uint32_t {
+ NEONLoadStoreMultiStructFixed = 0x0C000000,
+ NEONLoadStoreMultiStructFMask = 0xBFBF0000,
+ NEONLoadStoreMultiStructMask = 0xBFFFF000,
+ NEONLoadStoreMultiStructStore = NEONLoadStoreMultiStructFixed,
+ NEONLoadStoreMultiStructLoad = NEONLoadStoreMultiStructFixed |
+ NEONLoadStoreMultiL,
+ NEON_LD1_1v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_1v,
+ NEON_LD1_2v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_2v,
+ NEON_LD1_3v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_3v,
+ NEON_LD1_4v = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti1_4v,
+ NEON_LD2 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti2,
+ NEON_LD3 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti3,
+ NEON_LD4 = NEONLoadStoreMultiStructLoad | NEONLoadStoreMulti4,
+ NEON_ST1_1v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_1v,
+ NEON_ST1_2v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_2v,
+ NEON_ST1_3v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_3v,
+ NEON_ST1_4v = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti1_4v,
+ NEON_ST2 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti2,
+ NEON_ST3 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti3,
+ NEON_ST4 = NEONLoadStoreMultiStructStore | NEONLoadStoreMulti4
+};
+
+// NEON load/store multiple structures with post-index addressing.
+enum NEONLoadStoreMultiStructPostIndexOp : uint32_t {
+ NEONLoadStoreMultiStructPostIndexFixed = 0x0C800000,
+ NEONLoadStoreMultiStructPostIndexFMask = 0xBFA00000,
+ NEONLoadStoreMultiStructPostIndexMask = 0xBFE0F000,
+ NEONLoadStoreMultiStructPostIndex = 0x00800000,
+ NEON_LD1_1v_post = NEON_LD1_1v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_2v_post = NEON_LD1_2v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_3v_post = NEON_LD1_3v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD1_4v_post = NEON_LD1_4v | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD2_post = NEON_LD2 | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD3_post = NEON_LD3 | NEONLoadStoreMultiStructPostIndex,
+ NEON_LD4_post = NEON_LD4 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_1v_post = NEON_ST1_1v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_2v_post = NEON_ST1_2v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_3v_post = NEON_ST1_3v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST1_4v_post = NEON_ST1_4v | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST2_post = NEON_ST2 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST3_post = NEON_ST3 | NEONLoadStoreMultiStructPostIndex,
+ NEON_ST4_post = NEON_ST4 | NEONLoadStoreMultiStructPostIndex
+};
+
+enum NEONLoadStoreSingleOp : uint32_t {
+ NEONLoadStoreSingle1 = 0x00000000,
+ NEONLoadStoreSingle2 = 0x00200000,
+ NEONLoadStoreSingle3 = 0x00002000,
+ NEONLoadStoreSingle4 = 0x00202000,
+ NEONLoadStoreSingleL = 0x00400000,
+ NEONLoadStoreSingle_b = 0x00000000,
+ NEONLoadStoreSingle_h = 0x00004000,
+ NEONLoadStoreSingle_s = 0x00008000,
+ NEONLoadStoreSingle_d = 0x00008400,
+ NEONLoadStoreSingleAllLanes = 0x0000C000,
+ NEONLoadStoreSingleLenMask = 0x00202000
+};
+
+// NEON load/store single structure.
+enum NEONLoadStoreSingleStructOp : uint32_t {
+ NEONLoadStoreSingleStructFixed = 0x0D000000,
+ NEONLoadStoreSingleStructFMask = 0xBF9F0000,
+ NEONLoadStoreSingleStructMask = 0xBFFFE000,
+ NEONLoadStoreSingleStructStore = NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructLoad = NEONLoadStoreSingleStructFixed |
+ NEONLoadStoreSingleL,
+ NEONLoadStoreSingleStructLoad1 = NEONLoadStoreSingle1 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad2 = NEONLoadStoreSingle2 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad3 = NEONLoadStoreSingle3 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructLoad4 = NEONLoadStoreSingle4 |
+ NEONLoadStoreSingleStructLoad,
+ NEONLoadStoreSingleStructStore1 = NEONLoadStoreSingle1 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore2 = NEONLoadStoreSingle2 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore3 = NEONLoadStoreSingle3 |
+ NEONLoadStoreSingleStructFixed,
+ NEONLoadStoreSingleStructStore4 = NEONLoadStoreSingle4 |
+ NEONLoadStoreSingleStructFixed,
+ NEON_LD1_b = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_b,
+ NEON_LD1_h = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_h,
+ NEON_LD1_s = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_s,
+ NEON_LD1_d = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingle_d,
+ NEON_LD1R = NEONLoadStoreSingleStructLoad1 | NEONLoadStoreSingleAllLanes,
+ NEON_ST1_b = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_b,
+ NEON_ST1_h = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_h,
+ NEON_ST1_s = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_s,
+ NEON_ST1_d = NEONLoadStoreSingleStructStore1 | NEONLoadStoreSingle_d,
+
+ NEON_LD2_b = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_b,
+ NEON_LD2_h = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_h,
+ NEON_LD2_s = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_s,
+ NEON_LD2_d = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingle_d,
+ NEON_LD2R = NEONLoadStoreSingleStructLoad2 | NEONLoadStoreSingleAllLanes,
+ NEON_ST2_b = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_b,
+ NEON_ST2_h = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_h,
+ NEON_ST2_s = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_s,
+ NEON_ST2_d = NEONLoadStoreSingleStructStore2 | NEONLoadStoreSingle_d,
+
+ NEON_LD3_b = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_b,
+ NEON_LD3_h = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_h,
+ NEON_LD3_s = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_s,
+ NEON_LD3_d = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingle_d,
+ NEON_LD3R = NEONLoadStoreSingleStructLoad3 | NEONLoadStoreSingleAllLanes,
+ NEON_ST3_b = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_b,
+ NEON_ST3_h = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_h,
+ NEON_ST3_s = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_s,
+ NEON_ST3_d = NEONLoadStoreSingleStructStore3 | NEONLoadStoreSingle_d,
+
+ NEON_LD4_b = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_b,
+ NEON_LD4_h = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_h,
+ NEON_LD4_s = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_s,
+ NEON_LD4_d = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingle_d,
+ NEON_LD4R = NEONLoadStoreSingleStructLoad4 | NEONLoadStoreSingleAllLanes,
+ NEON_ST4_b = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_b,
+ NEON_ST4_h = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_h,
+ NEON_ST4_s = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_s,
+ NEON_ST4_d = NEONLoadStoreSingleStructStore4 | NEONLoadStoreSingle_d
+};
+
+// NEON load/store single structure with post-index addressing.
+enum NEONLoadStoreSingleStructPostIndexOp : uint32_t {
+ NEONLoadStoreSingleStructPostIndexFixed = 0x0D800000,
+ NEONLoadStoreSingleStructPostIndexFMask = 0xBF800000,
+ NEONLoadStoreSingleStructPostIndexMask = 0xBFE0E000,
+ NEONLoadStoreSingleStructPostIndex = 0x00800000,
+ NEON_LD1_b_post = NEON_LD1_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_h_post = NEON_LD1_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_s_post = NEON_LD1_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1_d_post = NEON_LD1_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD1R_post = NEON_LD1R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_b_post = NEON_ST1_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_h_post = NEON_ST1_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_s_post = NEON_ST1_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST1_d_post = NEON_ST1_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD2_b_post = NEON_LD2_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_h_post = NEON_LD2_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_s_post = NEON_LD2_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2_d_post = NEON_LD2_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD2R_post = NEON_LD2R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_b_post = NEON_ST2_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_h_post = NEON_ST2_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_s_post = NEON_ST2_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST2_d_post = NEON_ST2_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD3_b_post = NEON_LD3_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_h_post = NEON_LD3_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_s_post = NEON_LD3_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3_d_post = NEON_LD3_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD3R_post = NEON_LD3R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_b_post = NEON_ST3_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_h_post = NEON_ST3_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_s_post = NEON_ST3_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST3_d_post = NEON_ST3_d | NEONLoadStoreSingleStructPostIndex,
+
+ NEON_LD4_b_post = NEON_LD4_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_h_post = NEON_LD4_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_s_post = NEON_LD4_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4_d_post = NEON_LD4_d | NEONLoadStoreSingleStructPostIndex,
+ NEON_LD4R_post = NEON_LD4R | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_b_post = NEON_ST4_b | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_h_post = NEON_ST4_h | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_s_post = NEON_ST4_s | NEONLoadStoreSingleStructPostIndex,
+ NEON_ST4_d_post = NEON_ST4_d | NEONLoadStoreSingleStructPostIndex
+};
+
+// NEON modified immediate.
+enum NEONModifiedImmediateOp : uint32_t {
+ NEONModifiedImmediateFixed = 0x0F000400,
+ NEONModifiedImmediateFMask = 0x9FF80400,
+ NEONModifiedImmediateOpBit = 0x20000000,
+ NEONModifiedImmediate_FMOV = NEONModifiedImmediateFixed | 0x00000800,
+ NEONModifiedImmediate_MOVI = NEONModifiedImmediateFixed | 0x00000000,
+ NEONModifiedImmediate_MVNI = NEONModifiedImmediateFixed | 0x20000000,
+ NEONModifiedImmediate_ORR = NEONModifiedImmediateFixed | 0x00001000,
+ NEONModifiedImmediate_BIC = NEONModifiedImmediateFixed | 0x20001000
+};
+
+// NEON shift immediate.
+enum NEONShiftImmediateOp : uint32_t {
+ NEONShiftImmediateFixed = 0x0F000400,
+ NEONShiftImmediateFMask = 0x9F800400,
+ NEONShiftImmediateMask = 0xBF80FC00,
+ NEONShiftImmediateUBit = 0x20000000,
+ NEON_SHL = NEONShiftImmediateFixed | 0x00005000,
+ NEON_SSHLL = NEONShiftImmediateFixed | 0x0000A000,
+ NEON_USHLL = NEONShiftImmediateFixed | 0x2000A000,
+ NEON_SLI = NEONShiftImmediateFixed | 0x20005000,
+ NEON_SRI = NEONShiftImmediateFixed | 0x20004000,
+ NEON_SHRN = NEONShiftImmediateFixed | 0x00008000,
+ NEON_RSHRN = NEONShiftImmediateFixed | 0x00008800,
+ NEON_UQSHRN = NEONShiftImmediateFixed | 0x20009000,
+ NEON_UQRSHRN = NEONShiftImmediateFixed | 0x20009800,
+ NEON_SQSHRN = NEONShiftImmediateFixed | 0x00009000,
+ NEON_SQRSHRN = NEONShiftImmediateFixed | 0x00009800,
+ NEON_SQSHRUN = NEONShiftImmediateFixed | 0x20008000,
+ NEON_SQRSHRUN = NEONShiftImmediateFixed | 0x20008800,
+ NEON_SSHR = NEONShiftImmediateFixed | 0x00000000,
+ NEON_SRSHR = NEONShiftImmediateFixed | 0x00002000,
+ NEON_USHR = NEONShiftImmediateFixed | 0x20000000,
+ NEON_URSHR = NEONShiftImmediateFixed | 0x20002000,
+ NEON_SSRA = NEONShiftImmediateFixed | 0x00001000,
+ NEON_SRSRA = NEONShiftImmediateFixed | 0x00003000,
+ NEON_USRA = NEONShiftImmediateFixed | 0x20001000,
+ NEON_URSRA = NEONShiftImmediateFixed | 0x20003000,
+ NEON_SQSHLU = NEONShiftImmediateFixed | 0x20006000,
+ NEON_SCVTF_imm = NEONShiftImmediateFixed | 0x0000E000,
+ NEON_UCVTF_imm = NEONShiftImmediateFixed | 0x2000E000,
+ NEON_FCVTZS_imm = NEONShiftImmediateFixed | 0x0000F800,
+ NEON_FCVTZU_imm = NEONShiftImmediateFixed | 0x2000F800,
+ NEON_SQSHL_imm = NEONShiftImmediateFixed | 0x00007000,
+ NEON_UQSHL_imm = NEONShiftImmediateFixed | 0x20007000
+};
+
+// NEON table.
+enum NEONTableOp : uint32_t {
+ NEONTableFixed = 0x0E000000,
+ NEONTableFMask = 0xBF208C00,
+ NEONTableExt = 0x00001000,
+ NEONTableMask = 0xBF20FC00,
+ NEON_TBL_1v = NEONTableFixed | 0x00000000,
+ NEON_TBL_2v = NEONTableFixed | 0x00002000,
+ NEON_TBL_3v = NEONTableFixed | 0x00004000,
+ NEON_TBL_4v = NEONTableFixed | 0x00006000,
+ NEON_TBX_1v = NEON_TBL_1v | NEONTableExt,
+ NEON_TBX_2v = NEON_TBL_2v | NEONTableExt,
+ NEON_TBX_3v = NEON_TBL_3v | NEONTableExt,
+ NEON_TBX_4v = NEON_TBL_4v | NEONTableExt
+};
+
+// NEON perm.
+enum NEONPermOp : uint32_t {
+ NEONPermFixed = 0x0E000800,
+ NEONPermFMask = 0xBF208C00,
+ NEONPermMask = 0x3F20FC00,
+ NEON_UZP1 = NEONPermFixed | 0x00001000,
+ NEON_TRN1 = NEONPermFixed | 0x00002000,
+ NEON_ZIP1 = NEONPermFixed | 0x00003000,
+ NEON_UZP2 = NEONPermFixed | 0x00005000,
+ NEON_TRN2 = NEONPermFixed | 0x00006000,
+ NEON_ZIP2 = NEONPermFixed | 0x00007000
+};
+
+// NEON scalar instructions with two register operands.
+enum NEONScalar2RegMiscOp : uint32_t {
+ NEONScalar2RegMiscFixed = 0x5E200800,
+ NEONScalar2RegMiscFMask = 0xDF3E0C00,
+ NEONScalar2RegMiscMask = NEON_Q | NEONScalar | NEON2RegMiscMask,
+ NEON_CMGT_zero_scalar = NEON_Q | NEONScalar | NEON_CMGT_zero,
+ NEON_CMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_CMEQ_zero,
+ NEON_CMLT_zero_scalar = NEON_Q | NEONScalar | NEON_CMLT_zero,
+ NEON_CMGE_zero_scalar = NEON_Q | NEONScalar | NEON_CMGE_zero,
+ NEON_CMLE_zero_scalar = NEON_Q | NEONScalar | NEON_CMLE_zero,
+ NEON_ABS_scalar = NEON_Q | NEONScalar | NEON_ABS,
+ NEON_SQABS_scalar = NEON_Q | NEONScalar | NEON_SQABS,
+ NEON_NEG_scalar = NEON_Q | NEONScalar | NEON_NEG,
+ NEON_SQNEG_scalar = NEON_Q | NEONScalar | NEON_SQNEG,
+ NEON_SQXTN_scalar = NEON_Q | NEONScalar | NEON_SQXTN,
+ NEON_UQXTN_scalar = NEON_Q | NEONScalar | NEON_UQXTN,
+ NEON_SQXTUN_scalar = NEON_Q | NEONScalar | NEON_SQXTUN,
+ NEON_SUQADD_scalar = NEON_Q | NEONScalar | NEON_SUQADD,
+ NEON_USQADD_scalar = NEON_Q | NEONScalar | NEON_USQADD,
+
+ NEONScalar2RegMiscOpcode = NEON2RegMiscOpcode,
+ NEON_NEG_scalar_opcode = NEON_NEG_scalar & NEONScalar2RegMiscOpcode,
+
+ NEONScalar2RegMiscFPMask = NEONScalar2RegMiscMask | 0x00800000,
+ NEON_FRSQRTE_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE,
+ NEON_FRECPE_scalar = NEON_Q | NEONScalar | NEON_FRECPE,
+ NEON_SCVTF_scalar = NEON_Q | NEONScalar | NEON_SCVTF,
+ NEON_UCVTF_scalar = NEON_Q | NEONScalar | NEON_UCVTF,
+ NEON_FCMGT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_zero,
+ NEON_FCMEQ_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_zero,
+ NEON_FCMLT_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_zero,
+ NEON_FCMGE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_zero,
+ NEON_FCMLE_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_zero,
+ NEON_FRECPX_scalar = NEONScalar2RegMiscFixed | 0x0081F000,
+ NEON_FCVTNS_scalar = NEON_Q | NEONScalar | NEON_FCVTNS,
+ NEON_FCVTNU_scalar = NEON_Q | NEONScalar | NEON_FCVTNU,
+ NEON_FCVTPS_scalar = NEON_Q | NEONScalar | NEON_FCVTPS,
+ NEON_FCVTPU_scalar = NEON_Q | NEONScalar | NEON_FCVTPU,
+ NEON_FCVTMS_scalar = NEON_Q | NEONScalar | NEON_FCVTMS,
+ NEON_FCVTMU_scalar = NEON_Q | NEONScalar | NEON_FCVTMU,
+ NEON_FCVTZS_scalar = NEON_Q | NEONScalar | NEON_FCVTZS,
+ NEON_FCVTZU_scalar = NEON_Q | NEONScalar | NEON_FCVTZU,
+ NEON_FCVTAS_scalar = NEON_Q | NEONScalar | NEON_FCVTAS,
+ NEON_FCVTAU_scalar = NEON_Q | NEONScalar | NEON_FCVTAU,
+ NEON_FCVTXN_scalar = NEON_Q | NEONScalar | NEON_FCVTXN
+};
+
+// NEON instructions with two register operands (FP16).
+enum NEONScalar2RegMiscFP16Op : uint32_t {
+ NEONScalar2RegMiscFP16Fixed = 0x5E780800,
+ NEONScalar2RegMiscFP16FMask = 0xDF7E0C00,
+ NEONScalar2RegMiscFP16Mask = 0xFFFFFC00,
+ NEON_FCVTNS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTNS_H,
+ NEON_FCVTMS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTMS_H,
+ NEON_FCVTAS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTAS_H,
+ NEON_SCVTF_H_scalar = NEON_Q | NEONScalar | NEON_SCVTF_H,
+ NEON_FCMGT_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGT_H_zero,
+ NEON_FCMEQ_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMEQ_H_zero,
+ NEON_FCMLT_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLT_H_zero,
+ NEON_FCVTPS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTPS_H,
+ NEON_FCVTZS_H_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_H,
+ NEON_FRECPE_H_scalar = NEON_Q | NEONScalar | NEON_FRECPE_H,
+ NEON_FRECPX_H_scalar = NEONScalar2RegMiscFP16Fixed | 0x0081F000,
+ NEON_FCVTNU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTNU_H,
+ NEON_FCVTMU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTMU_H,
+ NEON_FCVTAU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTAU_H,
+ NEON_UCVTF_H_scalar = NEON_Q | NEONScalar | NEON_UCVTF_H,
+ NEON_FCMGE_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMGE_H_zero,
+ NEON_FCMLE_H_zero_scalar = NEON_Q | NEONScalar | NEON_FCMLE_H_zero,
+ NEON_FCVTPU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTPU_H,
+ NEON_FCVTZU_H_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_H,
+ NEON_FRSQRTE_H_scalar = NEON_Q | NEONScalar | NEON_FRSQRTE_H
+};
+
+// NEON scalar instructions with three same-type operands.
+enum NEONScalar3SameOp : uint32_t {
+ NEONScalar3SameFixed = 0x5E200400,
+ NEONScalar3SameFMask = 0xDF200400,
+ NEONScalar3SameMask = 0xFF20FC00,
+ NEON_ADD_scalar = NEON_Q | NEONScalar | NEON_ADD,
+ NEON_CMEQ_scalar = NEON_Q | NEONScalar | NEON_CMEQ,
+ NEON_CMGE_scalar = NEON_Q | NEONScalar | NEON_CMGE,
+ NEON_CMGT_scalar = NEON_Q | NEONScalar | NEON_CMGT,
+ NEON_CMHI_scalar = NEON_Q | NEONScalar | NEON_CMHI,
+ NEON_CMHS_scalar = NEON_Q | NEONScalar | NEON_CMHS,
+ NEON_CMTST_scalar = NEON_Q | NEONScalar | NEON_CMTST,
+ NEON_SUB_scalar = NEON_Q | NEONScalar | NEON_SUB,
+ NEON_UQADD_scalar = NEON_Q | NEONScalar | NEON_UQADD,
+ NEON_SQADD_scalar = NEON_Q | NEONScalar | NEON_SQADD,
+ NEON_UQSUB_scalar = NEON_Q | NEONScalar | NEON_UQSUB,
+ NEON_SQSUB_scalar = NEON_Q | NEONScalar | NEON_SQSUB,
+ NEON_USHL_scalar = NEON_Q | NEONScalar | NEON_USHL,
+ NEON_SSHL_scalar = NEON_Q | NEONScalar | NEON_SSHL,
+ NEON_UQSHL_scalar = NEON_Q | NEONScalar | NEON_UQSHL,
+ NEON_SQSHL_scalar = NEON_Q | NEONScalar | NEON_SQSHL,
+ NEON_URSHL_scalar = NEON_Q | NEONScalar | NEON_URSHL,
+ NEON_SRSHL_scalar = NEON_Q | NEONScalar | NEON_SRSHL,
+ NEON_UQRSHL_scalar = NEON_Q | NEONScalar | NEON_UQRSHL,
+ NEON_SQRSHL_scalar = NEON_Q | NEONScalar | NEON_SQRSHL,
+ NEON_SQDMULH_scalar = NEON_Q | NEONScalar | NEON_SQDMULH,
+ NEON_SQRDMULH_scalar = NEON_Q | NEONScalar | NEON_SQRDMULH,
+
+ // NEON floating point scalar instructions with three same-type operands.
+ NEONScalar3SameFPFixed = NEONScalar3SameFixed | 0x0000C000,
+ NEONScalar3SameFPFMask = NEONScalar3SameFMask | 0x0000C000,
+ NEONScalar3SameFPMask = NEONScalar3SameMask | 0x00800000,
+ NEON_FACGE_scalar = NEON_Q | NEONScalar | NEON_FACGE,
+ NEON_FACGT_scalar = NEON_Q | NEONScalar | NEON_FACGT,
+ NEON_FCMEQ_scalar = NEON_Q | NEONScalar | NEON_FCMEQ,
+ NEON_FCMGE_scalar = NEON_Q | NEONScalar | NEON_FCMGE,
+ NEON_FCMGT_scalar = NEON_Q | NEONScalar | NEON_FCMGT,
+ NEON_FMULX_scalar = NEON_Q | NEONScalar | NEON_FMULX,
+ NEON_FRECPS_scalar = NEON_Q | NEONScalar | NEON_FRECPS,
+ NEON_FRSQRTS_scalar = NEON_Q | NEONScalar | NEON_FRSQRTS,
+ NEON_FABD_scalar = NEON_Q | NEONScalar | NEON_FABD
+};
+
+// NEON scalar instructions with three different-type operands.
+enum NEONScalar3DiffOp : uint32_t {
+ NEONScalar3DiffFixed = 0x5E200000,
+ NEONScalar3DiffFMask = 0xDF200C00,
+ NEONScalar3DiffMask = NEON_Q | NEONScalar | NEON3DifferentMask,
+ NEON_SQDMLAL_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL,
+ NEON_SQDMLSL_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL,
+ NEON_SQDMULL_scalar = NEON_Q | NEONScalar | NEON_SQDMULL
+};
+
+// NEON scalar instructions with indexed element operand.
+enum NEONScalarByIndexedElementOp : uint32_t {
+ NEONScalarByIndexedElementFixed = 0x5F000000,
+ NEONScalarByIndexedElementFMask = 0xDF000400,
+ NEONScalarByIndexedElementMask = 0xFF00F400,
+ NEON_SQDMLAL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLAL_byelement,
+ NEON_SQDMLSL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMLSL_byelement,
+ NEON_SQDMULL_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULL_byelement,
+ NEON_SQDMULH_byelement_scalar = NEON_Q | NEONScalar | NEON_SQDMULH_byelement,
+ NEON_SQRDMULH_byelement_scalar
+ = NEON_Q | NEONScalar | NEON_SQRDMULH_byelement,
+ NEON_SQRDMLAH_byelement_scalar
+ = NEON_Q | NEONScalar | NEON_SQRDMLAH_byelement,
+ NEON_SQRDMLSH_byelement_scalar
+ = NEON_Q | NEONScalar | NEON_SQRDMLSH_byelement,
+ NEON_FMLA_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_H_byelement,
+ NEON_FMLS_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_H_byelement,
+ NEON_FMUL_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_H_byelement,
+ NEON_FMULX_H_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_H_byelement,
+
+ // Floating point instructions.
+ NEONScalarByIndexedElementFPFixed
+ = NEONScalarByIndexedElementFixed | 0x00800000,
+ NEONScalarByIndexedElementFPMask
+ = NEONScalarByIndexedElementMask | 0x00800000,
+ NEON_FMLA_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLA_byelement,
+ NEON_FMLS_byelement_scalar = NEON_Q | NEONScalar | NEON_FMLS_byelement,
+ NEON_FMUL_byelement_scalar = NEON_Q | NEONScalar | NEON_FMUL_byelement,
+ NEON_FMULX_byelement_scalar = NEON_Q | NEONScalar | NEON_FMULX_byelement
+};
+
+// NEON scalar register copy.
+enum NEONScalarCopyOp : uint32_t {
+ NEONScalarCopyFixed = 0x5E000400,
+ NEONScalarCopyFMask = 0xDFE08400,
+ NEONScalarCopyMask = 0xFFE0FC00,
+ NEON_DUP_ELEMENT_scalar = NEON_Q | NEONScalar | NEON_DUP_ELEMENT
+};
+
+// NEON scalar pairwise instructions.
+enum NEONScalarPairwiseOp : uint32_t {
+ NEONScalarPairwiseFixed = 0x5E300800,
+ NEONScalarPairwiseFMask = 0xDF3E0C00,
+ NEONScalarPairwiseMask = 0xFFB1F800,
+ NEON_ADDP_scalar = NEONScalarPairwiseFixed | 0x0081B000,
+ NEON_FMAXNMP_h_scalar = NEONScalarPairwiseFixed | 0x0000C000,
+ NEON_FADDP_h_scalar = NEONScalarPairwiseFixed | 0x0000D000,
+ NEON_FMAXP_h_scalar = NEONScalarPairwiseFixed | 0x0000F000,
+ NEON_FMINNMP_h_scalar = NEONScalarPairwiseFixed | 0x0080C000,
+ NEON_FMINP_h_scalar = NEONScalarPairwiseFixed | 0x0080F000,
+ NEON_FMAXNMP_scalar = NEONScalarPairwiseFixed | 0x2000C000,
+ NEON_FMINNMP_scalar = NEONScalarPairwiseFixed | 0x2080C000,
+ NEON_FADDP_scalar = NEONScalarPairwiseFixed | 0x2000D000,
+ NEON_FMAXP_scalar = NEONScalarPairwiseFixed | 0x2000F000,
+ NEON_FMINP_scalar = NEONScalarPairwiseFixed | 0x2080F000
+};
+
+// NEON scalar shift immediate.
+enum NEONScalarShiftImmediateOp : uint32_t {
+ NEONScalarShiftImmediateFixed = 0x5F000400,
+ NEONScalarShiftImmediateFMask = 0xDF800400,
+ NEONScalarShiftImmediateMask = 0xFF80FC00,
+ NEON_SHL_scalar = NEON_Q | NEONScalar | NEON_SHL,
+ NEON_SLI_scalar = NEON_Q | NEONScalar | NEON_SLI,
+ NEON_SRI_scalar = NEON_Q | NEONScalar | NEON_SRI,
+ NEON_SSHR_scalar = NEON_Q | NEONScalar | NEON_SSHR,
+ NEON_USHR_scalar = NEON_Q | NEONScalar | NEON_USHR,
+ NEON_SRSHR_scalar = NEON_Q | NEONScalar | NEON_SRSHR,
+ NEON_URSHR_scalar = NEON_Q | NEONScalar | NEON_URSHR,
+ NEON_SSRA_scalar = NEON_Q | NEONScalar | NEON_SSRA,
+ NEON_USRA_scalar = NEON_Q | NEONScalar | NEON_USRA,
+ NEON_SRSRA_scalar = NEON_Q | NEONScalar | NEON_SRSRA,
+ NEON_URSRA_scalar = NEON_Q | NEONScalar | NEON_URSRA,
+ NEON_UQSHRN_scalar = NEON_Q | NEONScalar | NEON_UQSHRN,
+ NEON_UQRSHRN_scalar = NEON_Q | NEONScalar | NEON_UQRSHRN,
+ NEON_SQSHRN_scalar = NEON_Q | NEONScalar | NEON_SQSHRN,
+ NEON_SQRSHRN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRN,
+ NEON_SQSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQSHRUN,
+ NEON_SQRSHRUN_scalar = NEON_Q | NEONScalar | NEON_SQRSHRUN,
+ NEON_SQSHLU_scalar = NEON_Q | NEONScalar | NEON_SQSHLU,
+ NEON_SQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_SQSHL_imm,
+ NEON_UQSHL_imm_scalar = NEON_Q | NEONScalar | NEON_UQSHL_imm,
+ NEON_SCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_SCVTF_imm,
+ NEON_UCVTF_imm_scalar = NEON_Q | NEONScalar | NEON_UCVTF_imm,
+ NEON_FCVTZS_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZS_imm,
+ NEON_FCVTZU_imm_scalar = NEON_Q | NEONScalar | NEON_FCVTZU_imm
+};
+
+enum ReservedOp : uint32_t {
+ ReservedFixed = 0x00000000,
+ ReservedFMask = 0x1E000000,
+ ReservedMask = 0xFFFF0000,
+
+ UDF = ReservedFixed | 0x00000000
+};
+
+// Unimplemented and unallocated instructions. These are defined to make fixed
+// bit assertion easier.
+enum UnimplementedOp : uint32_t {
+ UnimplementedFixed = 0x00000000,
+ UnimplementedFMask = 0x00000000
+};
+
+enum UnallocatedOp : uint32_t {
+ UnallocatedFixed = 0x00000000,
+ UnallocatedFMask = 0x00000000
+};
+
+// Instruction bit pattern for an undefined instruction, that will trigger a
+// SIGILL at runtime.
+//
+// A couple of strategies we can use here. There are no unencoded
+// instructions in the instruction set that are guaranteed to remain that
+// way. However there are some currently (as of 2018) unencoded
+// instructions that are good candidates.
+//
+// Ideally, unencoded instructions should be non-destructive to the register
+// state, and should be unencoded at all exception levels.
+//
+// At the trap the pc will hold the address of the offending instruction.
+//
+// Some candidates for unencoded instructions:
+//
+// 0xd4a00000 (essentially dcps0, a good one since it is nonsensical and may
+// remain unencoded in the future for that reason)
+// 0x33000000 (bfm variant)
+// 0xd67f0000 (br variant)
+// 0x5ac00c00 (rbit variant)
+//
+// This instruction is "dcps0", also has 16-bit payload if needed.
+static constexpr uint32_t UNDEFINED_INST_PATTERN = 0xd4a00000;
+
+} // namespace vixl
+
+#endif // VIXL_A64_CONSTANTS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Cpu-Features-vixl.cpp b/js/src/jit/arm64/vixl/Cpu-Features-vixl.cpp
new file mode 100644
index 0000000000..f31c22fbf5
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-Features-vixl.cpp
@@ -0,0 +1,231 @@
+// Copyright 2018, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+#include "jit/arm64/vixl/Cpu-Features-vixl.h"
+
+#include <ostream>
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+#define VIXL_USE_AARCH64_CPU_HELPERS
+
+namespace vixl {
+
+static uint64_t MakeFeatureMask(CPUFeatures::Feature feature) {
+ if (feature == CPUFeatures::kNone) {
+ return 0;
+ } else {
+ // Check that the shift is well-defined, and that the feature is valid.
+ VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures <=
+ (sizeof(uint64_t) * 8));
+ VIXL_ASSERT(feature < CPUFeatures::kNumberOfFeatures);
+ return UINT64_C(1) << feature;
+ }
+}
+
+CPUFeatures::CPUFeatures(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3)
+ : features_(0) {
+ Combine(feature0, feature1, feature2, feature3);
+}
+
+CPUFeatures CPUFeatures::All() {
+ CPUFeatures all;
+ // Check that the shift is well-defined.
+ VIXL_STATIC_ASSERT(CPUFeatures::kNumberOfFeatures < (sizeof(uint64_t) * 8));
+ all.features_ = (UINT64_C(1) << kNumberOfFeatures) - 1;
+ return all;
+}
+
+CPUFeatures CPUFeatures::InferFromIDRegisters() {
+ // This function assumes that kIDRegisterEmulation is available.
+ CPUFeatures features(CPUFeatures::kIDRegisterEmulation);
+#ifdef VIXL_USE_AARCH64_CPU_HELPERS
+ // Note that the Linux kernel filters these values during emulation, so the
+ // results may not exactly match the expected hardware support.
+ features.Combine(CPU::InferCPUFeaturesFromIDRegisters());
+#endif
+ return features;
+}
+
+CPUFeatures CPUFeatures::InferFromOS(QueryIDRegistersOption option) {
+#ifdef VIXL_USE_AARCH64_CPU_HELPERS
+ return CPU::InferCPUFeaturesFromOS(option);
+#else
+ USE(option);
+ return CPUFeatures();
+#endif
+}
+
+void CPUFeatures::Combine(const CPUFeatures& other) {
+ features_ |= other.features_;
+}
+
+void CPUFeatures::Combine(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) {
+ features_ |= MakeFeatureMask(feature0);
+ features_ |= MakeFeatureMask(feature1);
+ features_ |= MakeFeatureMask(feature2);
+ features_ |= MakeFeatureMask(feature3);
+}
+
+void CPUFeatures::Remove(const CPUFeatures& other) {
+ features_ &= ~other.features_;
+}
+
+void CPUFeatures::Remove(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) {
+ features_ &= ~MakeFeatureMask(feature0);
+ features_ &= ~MakeFeatureMask(feature1);
+ features_ &= ~MakeFeatureMask(feature2);
+ features_ &= ~MakeFeatureMask(feature3);
+}
+
+CPUFeatures CPUFeatures::With(const CPUFeatures& other) const {
+ CPUFeatures f(*this);
+ f.Combine(other);
+ return f;
+}
+
+CPUFeatures CPUFeatures::With(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) const {
+ CPUFeatures f(*this);
+ f.Combine(feature0, feature1, feature2, feature3);
+ return f;
+}
+
+CPUFeatures CPUFeatures::Without(const CPUFeatures& other) const {
+ CPUFeatures f(*this);
+ f.Remove(other);
+ return f;
+}
+
+CPUFeatures CPUFeatures::Without(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) const {
+ CPUFeatures f(*this);
+ f.Remove(feature0, feature1, feature2, feature3);
+ return f;
+}
+
+bool CPUFeatures::Has(const CPUFeatures& other) const {
+ return (features_ & other.features_) == other.features_;
+}
+
+bool CPUFeatures::Has(Feature feature0,
+ Feature feature1,
+ Feature feature2,
+ Feature feature3) const {
+ uint64_t mask = MakeFeatureMask(feature0) | MakeFeatureMask(feature1) |
+ MakeFeatureMask(feature2) | MakeFeatureMask(feature3);
+ return (features_ & mask) == mask;
+}
+
+size_t CPUFeatures::Count() const { return CountSetBits(features_); }
+
+std::ostream& operator<<(std::ostream& os, CPUFeatures::Feature feature) {
+ // clang-format off
+ switch (feature) {
+#define VIXL_FORMAT_FEATURE(SYMBOL, NAME, CPUINFO) \
+ case CPUFeatures::SYMBOL: \
+ return os << NAME;
+VIXL_CPU_FEATURE_LIST(VIXL_FORMAT_FEATURE)
+#undef VIXL_FORMAT_FEATURE
+ case CPUFeatures::kNone:
+ return os << "none";
+ case CPUFeatures::kNumberOfFeatures:
+ VIXL_UNREACHABLE();
+ }
+ // clang-format on
+ VIXL_UNREACHABLE();
+ return os;
+}
+
+CPUFeatures::const_iterator CPUFeatures::begin() const {
+ if (features_ == 0) return const_iterator(this, kNone);
+
+ int feature_number = CountTrailingZeros(features_);
+ vixl::CPUFeatures::Feature feature =
+ static_cast<CPUFeatures::Feature>(feature_number);
+ return const_iterator(this, feature);
+}
+
+CPUFeatures::const_iterator CPUFeatures::end() const {
+ return const_iterator(this, kNone);
+}
+
+std::ostream& operator<<(std::ostream& os, const CPUFeatures& features) {
+ CPUFeatures::const_iterator it = features.begin();
+ while (it != features.end()) {
+ os << *it;
+ ++it;
+ if (it != features.end()) os << ", ";
+ }
+ return os;
+}
+
+bool CPUFeaturesConstIterator::operator==(
+ const CPUFeaturesConstIterator& other) const {
+ VIXL_ASSERT(IsValid());
+ return (cpu_features_ == other.cpu_features_) && (feature_ == other.feature_);
+}
+
+CPUFeatures::Feature CPUFeaturesConstIterator::operator++() { // Prefix
+ VIXL_ASSERT(IsValid());
+ do {
+ // Find the next feature. The order is unspecified.
+ feature_ = static_cast<CPUFeatures::Feature>(feature_ + 1);
+ if (feature_ == CPUFeatures::kNumberOfFeatures) {
+ feature_ = CPUFeatures::kNone;
+ VIXL_STATIC_ASSERT(CPUFeatures::kNone == -1);
+ }
+ VIXL_ASSERT(CPUFeatures::kNone <= feature_);
+ VIXL_ASSERT(feature_ < CPUFeatures::kNumberOfFeatures);
+ // cpu_features_->Has(kNone) is always true, so this will terminate even if
+ // the features list is empty.
+ } while (!cpu_features_->Has(feature_));
+ return feature_;
+}
+
+CPUFeatures::Feature CPUFeaturesConstIterator::operator++(int) { // Postfix
+ CPUFeatures::Feature result = feature_;
+ ++(*this);
+ return result;
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Cpu-Features-vixl.h b/js/src/jit/arm64/vixl/Cpu-Features-vixl.h
new file mode 100644
index 0000000000..b980233bf2
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-Features-vixl.h
@@ -0,0 +1,397 @@
+// Copyright 2018, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_CPU_FEATURES_H
+#define VIXL_CPU_FEATURES_H
+
+#include <ostream>
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+
+namespace vixl {
+
+
+// clang-format off
+#define VIXL_CPU_FEATURE_LIST(V) \
+ /* If set, the OS traps and emulates MRS accesses to relevant (EL1) ID_* */ \
+ /* registers, so that the detailed feature registers can be read */ \
+ /* directly. */ \
+ V(kIDRegisterEmulation, "ID register emulation", "cpuid") \
+ \
+ V(kFP, "FP", "fp") \
+ V(kNEON, "NEON", "asimd") \
+ V(kCRC32, "CRC32", "crc32") \
+ /* Cryptographic support instructions. */ \
+ V(kAES, "AES", "aes") \
+ V(kSHA1, "SHA1", "sha1") \
+ V(kSHA2, "SHA2", "sha2") \
+ /* A form of PMULL{2} with a 128-bit (1Q) result. */ \
+ V(kPmull1Q, "Pmull1Q", "pmull") \
+ /* Atomic operations on memory: CAS, LDADD, STADD, SWP, etc. */ \
+ V(kAtomics, "Atomics", "atomics") \
+ /* Limited ordering regions: LDLAR, STLLR and their variants. */ \
+ V(kLORegions, "LORegions", NULL) \
+ /* Rounding doubling multiply add/subtract: SQRDMLAH and SQRDMLSH. */ \
+ V(kRDM, "RDM", "asimdrdm") \
+ /* Scalable Vector Extension. */ \
+ V(kSVE, "SVE", "sve") \
+ /* SDOT and UDOT support (in NEON). */ \
+ V(kDotProduct, "DotProduct", "asimddp") \
+ /* Half-precision (FP16) support for FP and NEON, respectively. */ \
+ V(kFPHalf, "FPHalf", "fphp") \
+ V(kNEONHalf, "NEONHalf", "asimdhp") \
+ /* The RAS extension, including the ESB instruction. */ \
+ V(kRAS, "RAS", NULL) \
+ /* Data cache clean to the point of persistence: DC CVAP. */ \
+ V(kDCPoP, "DCPoP", "dcpop") \
+ /* Data cache clean to the point of deep persistence: DC CVADP. */ \
+ V(kDCCVADP, "DCCVADP", NULL) \
+ /* Cryptographic support instructions. */ \
+ V(kSHA3, "SHA3", "sha3") \
+ V(kSHA512, "SHA512", "sha512") \
+ V(kSM3, "SM3", "sm3") \
+ V(kSM4, "SM4", "sm4") \
+ /* Pointer authentication for addresses. */ \
+ V(kPAuth, "PAuth", NULL) \
+ /* Pointer authentication for addresses uses QARMA. */ \
+ V(kPAuthQARMA, "PAuthQARMA", NULL) \
+ /* Generic authentication (using the PACGA instruction). */ \
+ V(kPAuthGeneric, "PAuthGeneric", NULL) \
+ /* Generic authentication uses QARMA. */ \
+ V(kPAuthGenericQARMA, "PAuthGenericQARMA", NULL) \
+ /* JavaScript-style FP -> integer conversion instruction: FJCVTZS. */ \
+ V(kJSCVT, "JSCVT", "jscvt") \
+ /* Complex number support for NEON: FCMLA and FCADD. */ \
+ V(kFcma, "Fcma", "fcma") \
+ /* RCpc-based model (for weaker release consistency): LDAPR and variants. */ \
+ V(kRCpc, "RCpc", "lrcpc") \
+ V(kRCpcImm, "RCpc (imm)", "ilrcpc") \
+ /* Flag manipulation instructions: SETF{8,16}, CFINV, RMIF. */ \
+ V(kFlagM, "FlagM", "flagm") \
+ /* Unaligned single-copy atomicity. */ \
+ V(kUSCAT, "USCAT", "uscat") \
+ /* FP16 fused multiply-add or -subtract long: FMLAL{2}, FMLSL{2}. */ \
+ V(kFHM, "FHM", "asimdfhm") \
+ /* Data-independent timing (for selected instructions). */ \
+ V(kDIT, "DIT", "dit") \
+ /* Branch target identification. */ \
+ V(kBTI, "BTI", NULL) \
+ /* Flag manipulation instructions: {AX,XA}FLAG */ \
+ V(kAXFlag, "AXFlag", NULL) \
+ /* Random number generation extension, */ \
+ V(kRNG, "RNG", NULL) \
+ /* Floating-point round to {32,64}-bit integer. */ \
+ V(kFrintToFixedSizedInt,"Frint (bounded)", NULL)
+// clang-format on
+
+
+class CPUFeaturesConstIterator;
+
+// A representation of the set of features known to be supported by the target
+// device. Each feature is represented by a simple boolean flag.
+//
+// - When the Assembler is asked to assemble an instruction, it asserts (in
+// debug mode) that the necessary features are available.
+//
+// - TODO: The MacroAssembler relies on the Assembler's assertions, but in
+// some cases it may be useful for macros to generate a fall-back sequence
+// in case features are not available.
+//
+// - The Simulator assumes by default that all features are available, but it
+// is possible to configure it to fail if the simulated code uses features
+// that are not enabled.
+//
+// The Simulator also offers pseudo-instructions to allow features to be
+// enabled and disabled dynamically. This is useful when you want to ensure
+// that some features are constrained to certain areas of code.
+//
+// - The base Disassembler knows nothing about CPU features, but the
+// PrintDisassembler can be configured to annotate its output with warnings
+// about unavailable features. The Simulator uses this feature when
+// instruction trace is enabled.
+//
+// - The Decoder-based components -- the Simulator and PrintDisassembler --
+// rely on a CPUFeaturesAuditor visitor. This visitor keeps a list of
+// features actually encountered so that a large block of code can be
+// examined (either directly or through simulation), and the required
+// features analysed later.
+//
+// Expected usage:
+//
+// // By default, VIXL uses CPUFeatures::AArch64LegacyBaseline(), for
+// // compatibility with older version of VIXL.
+// MacroAssembler masm;
+//
+// // Generate code only for the current CPU.
+// masm.SetCPUFeatures(CPUFeatures::InferFromOS());
+//
+// // Turn off feature checking entirely.
+// masm.SetCPUFeatures(CPUFeatures::All());
+//
+// Feature set manipulation:
+//
+// CPUFeatures f; // The default constructor gives an empty set.
+// // Individual features can be added (or removed).
+// f.Combine(CPUFeatures::kFP, CPUFeatures::kNEON, CPUFeatures::AES);
+// f.Remove(CPUFeatures::kNEON);
+//
+// // Some helpers exist for extensions that provide several features.
+// f.Remove(CPUFeatures::All());
+// f.Combine(CPUFeatures::AArch64LegacyBaseline());
+//
+// // Chained construction is also possible.
+// CPUFeatures g =
+// f.With(CPUFeatures::kPmull1Q).Without(CPUFeatures::kCRC32);
+//
+// // Features can be queried. Where multiple features are given, they are
+// // combined with logical AND.
+// if (h.Has(CPUFeatures::kNEON)) { ... }
+// if (h.Has(CPUFeatures::kFP, CPUFeatures::kNEON)) { ... }
+// if (h.Has(g)) { ... }
+// // If the empty set is requested, the result is always 'true'.
+// VIXL_ASSERT(h.Has(CPUFeatures()));
+//
+// // For debug and reporting purposes, features can be enumerated (or
+// // printed directly):
+// std::cout << CPUFeatures::kNEON; // Prints something like "NEON".
+// std::cout << f; // Prints something like "FP, NEON, CRC32".
+class CPUFeatures {
+ public:
+ // clang-format off
+ // Individual features.
+ // These should be treated as opaque tokens. User code should not rely on
+ // specific numeric values or ordering.
+ enum Feature {
+ // Refer to VIXL_CPU_FEATURE_LIST (above) for the list of feature names that
+ // this class supports.
+
+ kNone = -1,
+#define VIXL_DECLARE_FEATURE(SYMBOL, NAME, CPUINFO) SYMBOL,
+ VIXL_CPU_FEATURE_LIST(VIXL_DECLARE_FEATURE)
+#undef VIXL_DECLARE_FEATURE
+ kNumberOfFeatures
+ };
+ // clang-format on
+
+ // By default, construct with no features enabled.
+ CPUFeatures() : features_(0) {}
+
+ // Construct with some features already enabled.
+ CPUFeatures(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone);
+
+ // Construct with all features enabled. This can be used to disable feature
+ // checking: `Has(...)` returns true regardless of the argument.
+ static CPUFeatures All();
+
+ // Construct an empty CPUFeatures. This is equivalent to the default
+ // constructor, but is provided for symmetry and convenience.
+ static CPUFeatures None() { return CPUFeatures(); }
+
+ // The presence of these features was assumed by version of VIXL before this
+ // API was added, so using this set by default ensures API compatibility.
+ static CPUFeatures AArch64LegacyBaseline() {
+ return CPUFeatures(kFP, kNEON, kCRC32);
+ }
+
+ // Construct a new CPUFeatures object using ID registers. This assumes that
+ // kIDRegisterEmulation is present.
+ static CPUFeatures InferFromIDRegisters();
+
+ enum QueryIDRegistersOption {
+ kDontQueryIDRegisters,
+ kQueryIDRegistersIfAvailable
+ };
+
+ // Construct a new CPUFeatures object based on what the OS reports.
+ static CPUFeatures InferFromOS(
+ QueryIDRegistersOption option = kQueryIDRegistersIfAvailable);
+
+ // Combine another CPUFeatures object into this one. Features that already
+ // exist in this set are left unchanged.
+ void Combine(const CPUFeatures& other);
+
+ // Combine specific features into this set. Features that already exist in
+ // this set are left unchanged.
+ void Combine(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone);
+
+ // Remove features in another CPUFeatures object from this one.
+ void Remove(const CPUFeatures& other);
+
+ // Remove specific features from this set.
+ void Remove(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone);
+
+ // Chaining helpers for convenient construction.
+ CPUFeatures With(const CPUFeatures& other) const;
+ CPUFeatures With(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone) const;
+ CPUFeatures Without(const CPUFeatures& other) const;
+ CPUFeatures Without(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone) const;
+
+ // Query features.
+ // Note that an empty query (like `Has(kNone)`) always returns true.
+ bool Has(const CPUFeatures& other) const;
+ bool Has(Feature feature0,
+ Feature feature1 = kNone,
+ Feature feature2 = kNone,
+ Feature feature3 = kNone) const;
+
+ // Return the number of enabled features.
+ size_t Count() const;
+ bool HasNoFeatures() const { return Count() == 0; }
+
+ // Check for equivalence.
+ bool operator==(const CPUFeatures& other) const {
+ return Has(other) && other.Has(*this);
+ }
+ bool operator!=(const CPUFeatures& other) const { return !(*this == other); }
+
+ typedef CPUFeaturesConstIterator const_iterator;
+
+ const_iterator begin() const;
+ const_iterator end() const;
+
+ private:
+ // Each bit represents a feature. This field will be replaced as needed if
+ // features are added.
+ uint64_t features_;
+
+ friend std::ostream& operator<<(std::ostream& os,
+ const vixl::CPUFeatures& features);
+};
+
+std::ostream& operator<<(std::ostream& os, vixl::CPUFeatures::Feature feature);
+std::ostream& operator<<(std::ostream& os, const vixl::CPUFeatures& features);
+
+// This is not a proper C++ iterator type, but it simulates enough of
+// ForwardIterator that simple loops can be written.
+class CPUFeaturesConstIterator {
+ public:
+ CPUFeaturesConstIterator(const CPUFeatures* cpu_features = NULL,
+ CPUFeatures::Feature start = CPUFeatures::kNone)
+ : cpu_features_(cpu_features), feature_(start) {
+ VIXL_ASSERT(IsValid());
+ }
+
+ bool operator==(const CPUFeaturesConstIterator& other) const;
+ bool operator!=(const CPUFeaturesConstIterator& other) const {
+ return !(*this == other);
+ }
+ CPUFeatures::Feature operator++();
+ CPUFeatures::Feature operator++(int);
+
+ CPUFeatures::Feature operator*() const {
+ VIXL_ASSERT(IsValid());
+ return feature_;
+ }
+
+ // For proper support of C++'s simplest "Iterator" concept, this class would
+ // have to define member types (such as CPUFeaturesIterator::pointer) to make
+ // it appear as if it iterates over Feature objects in memory. That is, we'd
+ // need CPUFeatures::iterator to behave like std::vector<Feature>::iterator.
+ // This is at least partially possible -- the std::vector<bool> specialisation
+ // does something similar -- but it doesn't seem worthwhile for a
+ // special-purpose debug helper, so they are omitted here.
+ private:
+ const CPUFeatures* cpu_features_;
+ CPUFeatures::Feature feature_;
+
+ bool IsValid() const {
+ return ((cpu_features_ == NULL) && (feature_ == CPUFeatures::kNone)) ||
+ cpu_features_->Has(feature_);
+ }
+};
+
+// A convenience scope for temporarily modifying a CPU features object. This
+// allows features to be enabled for short sequences.
+//
+// Expected usage:
+//
+// {
+// CPUFeaturesScope cpu(&masm, CPUFeatures::kCRC32);
+// // This scope can now use CRC32, as well as anything else that was enabled
+// // before the scope.
+//
+// ...
+//
+// // At the end of the scope, the original CPU features are restored.
+// }
+class CPUFeaturesScope {
+ public:
+ // Start a CPUFeaturesScope on any object that implements
+ // `CPUFeatures* GetCPUFeatures()`.
+ template <typename T>
+ explicit CPUFeaturesScope(T* cpu_features_wrapper,
+ CPUFeatures::Feature feature0 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature1 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature2 = CPUFeatures::kNone,
+ CPUFeatures::Feature feature3 = CPUFeatures::kNone)
+ : cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
+ old_features_(*cpu_features_) {
+ cpu_features_->Combine(feature0, feature1, feature2, feature3);
+ }
+
+ template <typename T>
+ CPUFeaturesScope(T* cpu_features_wrapper, const CPUFeatures& other)
+ : cpu_features_(cpu_features_wrapper->GetCPUFeatures()),
+ old_features_(*cpu_features_) {
+ cpu_features_->Combine(other);
+ }
+
+ ~CPUFeaturesScope() { *cpu_features_ = old_features_; }
+
+ // For advanced usage, the CPUFeatures object can be accessed directly.
+ // The scope will restore the original state when it ends.
+
+ CPUFeatures* GetCPUFeatures() const { return cpu_features_; }
+
+ void SetCPUFeatures(const CPUFeatures& cpu_features) {
+ *cpu_features_ = cpu_features;
+ }
+
+ private:
+ CPUFeatures* const cpu_features_;
+ const CPUFeatures old_features_;
+};
+
+
+} // namespace vixl
+
+#endif // VIXL_CPU_FEATURES_H
diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.cpp b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
new file mode 100644
index 0000000000..13b232d5b7
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.cpp
@@ -0,0 +1,252 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+
+#include "jstypes.h"
+
+#if defined(__aarch64__) && (defined(__ANDROID__) || defined(__linux__))
+#include <sys/auxv.h>
+#define VIXL_USE_LINUX_HWCAP 1
+#endif
+
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+
+namespace vixl {
+
+
+const IDRegister::Field AA64PFR0::kFP(16, Field::kSigned);
+const IDRegister::Field AA64PFR0::kAdvSIMD(20, Field::kSigned);
+const IDRegister::Field AA64PFR0::kSVE(32);
+const IDRegister::Field AA64PFR0::kDIT(48);
+
+const IDRegister::Field AA64PFR1::kBT(0);
+
+const IDRegister::Field AA64ISAR0::kAES(4);
+const IDRegister::Field AA64ISAR0::kSHA1(8);
+const IDRegister::Field AA64ISAR0::kSHA2(12);
+const IDRegister::Field AA64ISAR0::kCRC32(16);
+const IDRegister::Field AA64ISAR0::kAtomic(20);
+const IDRegister::Field AA64ISAR0::kRDM(28);
+const IDRegister::Field AA64ISAR0::kSHA3(32);
+const IDRegister::Field AA64ISAR0::kSM3(36);
+const IDRegister::Field AA64ISAR0::kSM4(40);
+const IDRegister::Field AA64ISAR0::kDP(44);
+const IDRegister::Field AA64ISAR0::kFHM(48);
+const IDRegister::Field AA64ISAR0::kTS(52);
+
+const IDRegister::Field AA64ISAR1::kDPB(0);
+const IDRegister::Field AA64ISAR1::kAPA(4);
+const IDRegister::Field AA64ISAR1::kAPI(8);
+const IDRegister::Field AA64ISAR1::kJSCVT(12);
+const IDRegister::Field AA64ISAR1::kFCMA(16);
+const IDRegister::Field AA64ISAR1::kLRCPC(20);
+const IDRegister::Field AA64ISAR1::kGPA(24);
+const IDRegister::Field AA64ISAR1::kGPI(28);
+const IDRegister::Field AA64ISAR1::kFRINTTS(32);
+const IDRegister::Field AA64ISAR1::kSB(36);
+const IDRegister::Field AA64ISAR1::kSPECRES(40);
+
+const IDRegister::Field AA64MMFR1::kLO(16);
+
+CPUFeatures AA64PFR0::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kFP) >= 0) f.Combine(CPUFeatures::kFP);
+ if (Get(kFP) >= 1) f.Combine(CPUFeatures::kFPHalf);
+ if (Get(kAdvSIMD) >= 0) f.Combine(CPUFeatures::kNEON);
+ if (Get(kAdvSIMD) >= 1) f.Combine(CPUFeatures::kNEONHalf);
+ if (Get(kSVE) >= 1) f.Combine(CPUFeatures::kSVE);
+ if (Get(kDIT) >= 1) f.Combine(CPUFeatures::kDIT);
+ return f;
+}
+
+CPUFeatures AA64PFR1::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kBT) >= 1) f.Combine(CPUFeatures::kBTI);
+ return f;
+}
+
+CPUFeatures AA64ISAR0::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kAES) >= 1) f.Combine(CPUFeatures::kAES);
+ if (Get(kAES) >= 2) f.Combine(CPUFeatures::kPmull1Q);
+ if (Get(kSHA1) >= 1) f.Combine(CPUFeatures::kSHA1);
+ if (Get(kSHA2) >= 1) f.Combine(CPUFeatures::kSHA2);
+ if (Get(kSHA2) >= 2) f.Combine(CPUFeatures::kSHA512);
+ if (Get(kCRC32) >= 1) f.Combine(CPUFeatures::kCRC32);
+ if (Get(kAtomic) >= 1) f.Combine(CPUFeatures::kAtomics);
+ if (Get(kRDM) >= 1) f.Combine(CPUFeatures::kRDM);
+ if (Get(kSHA3) >= 1) f.Combine(CPUFeatures::kSHA3);
+ if (Get(kSM3) >= 1) f.Combine(CPUFeatures::kSM3);
+ if (Get(kSM4) >= 1) f.Combine(CPUFeatures::kSM4);
+ if (Get(kDP) >= 1) f.Combine(CPUFeatures::kDotProduct);
+ if (Get(kFHM) >= 1) f.Combine(CPUFeatures::kFHM);
+ if (Get(kTS) >= 1) f.Combine(CPUFeatures::kFlagM);
+ if (Get(kTS) >= 2) f.Combine(CPUFeatures::kAXFlag);
+ return f;
+}
+
+CPUFeatures AA64ISAR1::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kDPB) >= 1) f.Combine(CPUFeatures::kDCPoP);
+ if (Get(kJSCVT) >= 1) f.Combine(CPUFeatures::kJSCVT);
+ if (Get(kFCMA) >= 1) f.Combine(CPUFeatures::kFcma);
+ if (Get(kLRCPC) >= 1) f.Combine(CPUFeatures::kRCpc);
+ if (Get(kLRCPC) >= 2) f.Combine(CPUFeatures::kRCpcImm);
+ if (Get(kFRINTTS) >= 1) f.Combine(CPUFeatures::kFrintToFixedSizedInt);
+
+ if (Get(kAPI) >= 1) f.Combine(CPUFeatures::kPAuth);
+ if (Get(kAPA) >= 1) f.Combine(CPUFeatures::kPAuth, CPUFeatures::kPAuthQARMA);
+ if (Get(kGPI) >= 1) f.Combine(CPUFeatures::kPAuthGeneric);
+ if (Get(kGPA) >= 1) {
+ f.Combine(CPUFeatures::kPAuthGeneric, CPUFeatures::kPAuthGenericQARMA);
+ }
+ return f;
+}
+
+CPUFeatures AA64MMFR1::GetCPUFeatures() const {
+ CPUFeatures f;
+ if (Get(kLO) >= 1) f.Combine(CPUFeatures::kLORegions);
+ return f;
+}
+
+int IDRegister::Get(IDRegister::Field field) const {
+ int msb = field.GetMsb();
+ int lsb = field.GetLsb();
+ VIXL_STATIC_ASSERT(static_cast<size_t>(Field::kMaxWidthInBits) <
+ (sizeof(int) * kBitsPerByte));
+ switch (field.GetType()) {
+ case Field::kSigned:
+ return static_cast<int>(ExtractSignedBitfield64(msb, lsb, value_));
+ case Field::kUnsigned:
+ return static_cast<int>(ExtractUnsignedBitfield64(msb, lsb, value_));
+ }
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+CPUFeatures CPU::InferCPUFeaturesFromIDRegisters() {
+ CPUFeatures f;
+#define VIXL_COMBINE_ID_REG(NAME) f.Combine(Read##NAME().GetCPUFeatures());
+ VIXL_AARCH64_ID_REG_LIST(VIXL_COMBINE_ID_REG)
+#undef VIXL_COMBINE_ID_REG
+ return f;
+}
+
+CPUFeatures CPU::InferCPUFeaturesFromOS(
+ CPUFeatures::QueryIDRegistersOption option) {
+ CPUFeatures features;
+
+#if VIXL_USE_LINUX_HWCAP
+ // Map each set bit onto a feature. Ideally, we'd use HWCAP_* macros rather
+ // than explicit bits, but explicit bits allow us to identify features that
+ // the toolchain doesn't know about.
+ static const CPUFeatures::Feature kFeatureBits[] = {
+ // Bits 0-7
+ CPUFeatures::kFP,
+ CPUFeatures::kNEON,
+ CPUFeatures::kNone, // "EVTSTRM", which VIXL doesn't track.
+ CPUFeatures::kAES,
+ CPUFeatures::kPmull1Q,
+ CPUFeatures::kSHA1,
+ CPUFeatures::kSHA2,
+ CPUFeatures::kCRC32,
+ // Bits 8-15
+ CPUFeatures::kAtomics,
+ CPUFeatures::kFPHalf,
+ CPUFeatures::kNEONHalf,
+ CPUFeatures::kIDRegisterEmulation,
+ CPUFeatures::kRDM,
+ CPUFeatures::kJSCVT,
+ CPUFeatures::kFcma,
+ CPUFeatures::kRCpc,
+ // Bits 16-23
+ CPUFeatures::kDCPoP,
+ CPUFeatures::kSHA3,
+ CPUFeatures::kSM3,
+ CPUFeatures::kSM4,
+ CPUFeatures::kDotProduct,
+ CPUFeatures::kSHA512,
+ CPUFeatures::kSVE,
+ CPUFeatures::kFHM,
+ // Bits 24-27
+ CPUFeatures::kDIT,
+ CPUFeatures::kUSCAT,
+ CPUFeatures::kRCpcImm,
+ CPUFeatures::kFlagM
+ // Bits 28-31 are unassigned.
+ };
+ static const size_t kFeatureBitCount =
+ sizeof(kFeatureBits) / sizeof(kFeatureBits[0]);
+
+ // Mozilla change: Set the default for the simulator.
+#ifdef JS_SIMULATOR_ARM64
+ unsigned long auxv = ~(0UL); // Enable all features for the Simulator.
+#else
+ unsigned long auxv = getauxval(AT_HWCAP); // NOLINT(runtime/int)
+#endif
+
+ VIXL_STATIC_ASSERT(kFeatureBitCount < (sizeof(auxv) * kBitsPerByte));
+ for (size_t i = 0; i < kFeatureBitCount; i++) {
+ if (auxv & (1UL << i)) features.Combine(kFeatureBits[i]);
+ }
+#endif // VIXL_USE_LINUX_HWCAP
+
+ if ((option == CPUFeatures::kQueryIDRegistersIfAvailable) &&
+ (features.Has(CPUFeatures::kIDRegisterEmulation))) {
+ features.Combine(InferCPUFeaturesFromIDRegisters());
+ }
+ return features;
+}
+
+
+#ifdef __aarch64__
+#define VIXL_READ_ID_REG(NAME) \
+ NAME CPU::Read##NAME() { \
+ uint64_t value = 0; \
+ __asm__("mrs %0, ID_" #NAME "_EL1" : "=r"(value)); \
+ return NAME(value); \
+ }
+#else // __aarch64__
+#define VIXL_READ_ID_REG(NAME) \
+ NAME CPU::Read##NAME() { \
+ /* TODO: Use VIXL_UNREACHABLE once it works in release builds. */ \
+ VIXL_ABORT(); \
+ }
+#endif // __aarch64__
+
+VIXL_AARCH64_ID_REG_LIST(VIXL_READ_ID_REG)
+
+#undef VIXL_READ_ID_REG
+
+
+// Initialise to smallest possible cache size.
+unsigned CPU::dcache_line_size_ = 1;
+unsigned CPU::icache_line_size_ = 1;
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Cpu-vixl.h b/js/src/jit/arm64/vixl/Cpu-vixl.h
new file mode 100644
index 0000000000..4db51aad6b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Cpu-vixl.h
@@ -0,0 +1,241 @@
+// Copyright 2014, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_CPU_AARCH64_H
+#define VIXL_CPU_AARCH64_H
+
+#include "jit/arm64/vixl/Cpu-Features-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+#ifndef VIXL_INCLUDE_TARGET_AARCH64
+// The supporting .cc file is only compiled when the A64 target is selected.
+// Throw an explicit error now to avoid a harder-to-debug linker error later.
+//
+// These helpers _could_ work on any AArch64 host, even when generating AArch32
+// code, but we don't support this because the available features may differ
+// between AArch32 and AArch64 on the same platform, so basing AArch32 code
+// generation on aarch64::CPU features is probably broken.
+#error cpu-aarch64.h requires VIXL_INCLUDE_TARGET_AARCH64 (scons target=a64).
+#endif
+
+namespace vixl {
+
+// A CPU ID register, for use with CPUFeatures::kIDRegisterEmulation. Fields
+// specific to each register are described in relevant subclasses.
+class IDRegister {
+ protected:
+ explicit IDRegister(uint64_t value = 0) : value_(value) {}
+
+ class Field {
+ public:
+ enum Type { kUnsigned, kSigned };
+
+ explicit Field(int lsb, Type type = kUnsigned) : lsb_(lsb), type_(type) {}
+
+ static const int kMaxWidthInBits = 4;
+
+ int GetWidthInBits() const {
+ // All current ID fields have four bits.
+ return kMaxWidthInBits;
+ }
+ int GetLsb() const { return lsb_; }
+ int GetMsb() const { return lsb_ + GetWidthInBits() - 1; }
+ Type GetType() const { return type_; }
+
+ private:
+ int lsb_;
+ Type type_;
+ };
+
+ public:
+ // Extract the specified field, performing sign-extension for signed fields.
+ // This allows us to implement the 'value >= number' detection mechanism
+ // recommended by the Arm ARM, for both signed and unsigned fields.
+ int Get(Field field) const;
+
+ private:
+ uint64_t value_;
+};
+
+class AA64PFR0 : public IDRegister {
+ public:
+ explicit AA64PFR0(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kFP;
+ static const Field kAdvSIMD;
+ static const Field kSVE;
+ static const Field kDIT;
+};
+
+class AA64PFR1 : public IDRegister {
+ public:
+ explicit AA64PFR1(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kBT;
+};
+
+class AA64ISAR0 : public IDRegister {
+ public:
+ explicit AA64ISAR0(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kAES;
+ static const Field kSHA1;
+ static const Field kSHA2;
+ static const Field kCRC32;
+ static const Field kAtomic;
+ static const Field kRDM;
+ static const Field kSHA3;
+ static const Field kSM3;
+ static const Field kSM4;
+ static const Field kDP;
+ static const Field kFHM;
+ static const Field kTS;
+};
+
+class AA64ISAR1 : public IDRegister {
+ public:
+ explicit AA64ISAR1(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kDPB;
+ static const Field kAPA;
+ static const Field kAPI;
+ static const Field kJSCVT;
+ static const Field kFCMA;
+ static const Field kLRCPC;
+ static const Field kGPA;
+ static const Field kGPI;
+ static const Field kFRINTTS;
+ static const Field kSB;
+ static const Field kSPECRES;
+};
+
+class AA64MMFR1 : public IDRegister {
+ public:
+ explicit AA64MMFR1(uint64_t value) : IDRegister(value) {}
+
+ CPUFeatures GetCPUFeatures() const;
+
+ private:
+ static const Field kLO;
+};
+
+class CPU {
+ public:
+ // Initialise CPU support.
+ static void SetUp();
+
+ // Ensures the data at a given address and with a given size is the same for
+ // the I and D caches. I and D caches are not automatically coherent on ARM
+ // so this operation is required before any dynamically generated code can
+ // safely run.
+ static void EnsureIAndDCacheCoherency(void* address, size_t length);
+
+ // Flush the local instruction pipeline, forcing a reload of any instructions
+ // beyond this barrier from the icache.
+ static void FlushExecutionContext();
+
+ // Read and interpret the ID registers. This requires
+ // CPUFeatures::kIDRegisterEmulation, and therefore cannot be called on
+ // non-AArch64 platforms.
+ static CPUFeatures InferCPUFeaturesFromIDRegisters();
+
+ // Read and interpret CPUFeatures reported by the OS. Failed queries (or
+ // unsupported platforms) return an empty list. Note that this is
+ // indistinguishable from a successful query on a platform that advertises no
+ // features.
+ //
+ // Non-AArch64 hosts are considered to be unsupported platforms, and this
+ // function returns an empty list.
+ static CPUFeatures InferCPUFeaturesFromOS(
+ CPUFeatures::QueryIDRegistersOption option =
+ CPUFeatures::kQueryIDRegistersIfAvailable);
+
+ // Handle tagged pointers.
+ template <typename T>
+ static T SetPointerTag(T pointer, uint64_t tag) {
+ VIXL_ASSERT(IsUintN(kAddressTagWidth, tag));
+
+ // Use C-style casts to get static_cast behaviour for integral types (T),
+ // and reinterpret_cast behaviour for other types.
+
+ uint64_t raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
+
+ raw = (raw & ~kAddressTagMask) | (tag << kAddressTagOffset);
+ return (T)raw;
+ }
+
+ template <typename T>
+ static uint64_t GetPointerTag(T pointer) {
+ // Use C-style casts to get static_cast behaviour for integral types (T),
+ // and reinterpret_cast behaviour for other types.
+
+ uint64_t raw = (uint64_t)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(raw));
+
+ return (raw & kAddressTagMask) >> kAddressTagOffset;
+ }
+
+ private:
+#define VIXL_AARCH64_ID_REG_LIST(V) \
+ V(AA64PFR0) \
+ V(AA64PFR1) \
+ V(AA64ISAR0) \
+ V(AA64ISAR1) \
+ V(AA64MMFR1)
+
+#define VIXL_READ_ID_REG(NAME) static NAME Read##NAME();
+ // On native AArch64 platforms, read the named CPU ID registers. These require
+ // CPUFeatures::kIDRegisterEmulation, and should not be called on non-AArch64
+ // platforms.
+ VIXL_AARCH64_ID_REG_LIST(VIXL_READ_ID_REG)
+#undef VIXL_READ_ID_REG
+
+ // Return the content of the cache type register.
+ static uint32_t GetCacheType();
+
+ // I and D cache line size in bytes.
+ static unsigned icache_line_size_;
+ static unsigned dcache_line_size_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_CPU_AARCH64_H
diff --git a/js/src/jit/arm64/vixl/Debugger-vixl.cpp b/js/src/jit/arm64/vixl/Debugger-vixl.cpp
new file mode 100644
index 0000000000..fa3e15601e
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Debugger-vixl.cpp
@@ -0,0 +1,1535 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY ARM LIMITED AND CONTRIBUTORS "AS IS" AND ANY
+// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL ARM LIMITED BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
+// OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
+// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jstypes.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "jit/arm64/vixl/Debugger-vixl.h"
+
+#include "mozilla/Vector.h"
+
+#include "js/AllocPolicy.h"
+
+namespace vixl {
+
+// List of commands supported by the debugger.
+#define DEBUG_COMMAND_LIST(C) \
+C(HelpCommand) \
+C(ContinueCommand) \
+C(StepCommand) \
+C(DisasmCommand) \
+C(PrintCommand) \
+C(ExamineCommand)
+
+// Debugger command lines are broken up in token of different type to make
+// processing easier later on.
+class Token {
+ public:
+ virtual ~Token() {}
+
+ // Token type.
+ virtual bool IsRegister() const { return false; }
+ virtual bool IsFPRegister() const { return false; }
+ virtual bool IsIdentifier() const { return false; }
+ virtual bool IsAddress() const { return false; }
+ virtual bool IsInteger() const { return false; }
+ virtual bool IsFormat() const { return false; }
+ virtual bool IsUnknown() const { return false; }
+ // Token properties.
+ virtual bool CanAddressMemory() const { return false; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const = 0;
+ virtual void Print(FILE* out = stdout) const = 0;
+
+ static Token* Tokenize(const char* arg);
+};
+
+typedef mozilla::Vector<Token*, 0, js::SystemAllocPolicy> TokenVector;
+
+// Tokens often hold one value.
+template<typename T> class ValueToken : public Token {
+ public:
+ explicit ValueToken(T value) : value_(value) {}
+ ValueToken() {}
+
+ T value() const { return value_; }
+
+ virtual uint8_t* ToAddress(Debugger* debugger) const override {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ protected:
+ T value_;
+};
+
+// Integer registers (X or W) and their aliases.
+// Format: wn or xn with 0 <= n < 32 or a name in the aliases list.
+class RegisterToken : public ValueToken<const Register> {
+ public:
+ explicit RegisterToken(const Register reg)
+ : ValueToken<const Register>(reg) {}
+
+ virtual bool IsRegister() const override { return true; }
+ virtual bool CanAddressMemory() const override { return value().Is64Bits(); }
+ virtual uint8_t* ToAddress(Debugger* debugger) const override;
+ virtual void Print(FILE* out = stdout) const override;
+ const char* Name() const;
+
+ static Token* Tokenize(const char* arg);
+ static RegisterToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsRegister());
+ return reinterpret_cast<RegisterToken*>(tok);
+ }
+
+ private:
+ static const int kMaxAliasNumber = 4;
+ static const char* kXAliases[kNumberOfRegisters][kMaxAliasNumber];
+ static const char* kWAliases[kNumberOfRegisters][kMaxAliasNumber];
+};
+
+// Floating point registers (D or S).
+// Format: sn or dn with 0 <= n < 32.
+class FPRegisterToken : public ValueToken<const FPRegister> {
+ public:
+ explicit FPRegisterToken(const FPRegister fpreg)
+ : ValueToken<const FPRegister>(fpreg) {}
+
+ virtual bool IsFPRegister() const override { return true; }
+ virtual void Print(FILE* out = stdout) const override;
+
+ static Token* Tokenize(const char* arg);
+ static FPRegisterToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsFPRegister());
+ return reinterpret_cast<FPRegisterToken*>(tok);
+ }
+};
+
+
+// Non-register identifiers.
+// Format: Alphanumeric string starting with a letter.
+class IdentifierToken : public ValueToken<char*> {
+ public:
+ explicit IdentifierToken(const char* name) {
+ size_t size = strlen(name) + 1;
+ value_ = js_pod_malloc<char>(size);
+ strncpy(value_, name, size);
+ }
+ virtual ~IdentifierToken() { js_free(value_); }
+
+ virtual bool IsIdentifier() const override { return true; }
+ virtual bool CanAddressMemory() const override { return strcmp(value(), "pc") == 0; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const override;
+ virtual void Print(FILE* out = stdout) const override;
+
+ static Token* Tokenize(const char* arg);
+ static IdentifierToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsIdentifier());
+ return reinterpret_cast<IdentifierToken*>(tok);
+ }
+};
+
+// 64-bit address literal.
+// Format: 0x... with up to 16 hexadecimal digits.
+class AddressToken : public ValueToken<uint8_t*> {
+ public:
+ explicit AddressToken(uint8_t* address) : ValueToken<uint8_t*>(address) {}
+
+ virtual bool IsAddress() const override { return true; }
+ virtual bool CanAddressMemory() const override { return true; }
+ virtual uint8_t* ToAddress(Debugger* debugger) const override;
+ virtual void Print(FILE* out = stdout) const override;
+
+ static Token* Tokenize(const char* arg);
+ static AddressToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsAddress());
+ return reinterpret_cast<AddressToken*>(tok);
+ }
+};
+
+
+// 64-bit decimal integer literal.
+// Format: n.
+class IntegerToken : public ValueToken<int64_t> {
+ public:
+ explicit IntegerToken(int64_t value) : ValueToken<int64_t>(value) {}
+
+ virtual bool IsInteger() const override { return true; }
+ virtual void Print(FILE* out = stdout) const override;
+
+ static Token* Tokenize(const char* arg);
+ static IntegerToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsInteger());
+ return reinterpret_cast<IntegerToken*>(tok);
+ }
+};
+
+// Literal describing how to print a chunk of data (up to 64 bits).
+// Format: .ln
+// where l (letter) is one of
+// * x: hexadecimal
+// * s: signed integer
+// * u: unsigned integer
+// * f: floating point
+// * i: instruction
+// and n (size) is one of 8, 16, 32 and 64. n should be omitted for
+// instructions.
+class FormatToken : public Token {
+ public:
+ FormatToken() {}
+
+ virtual bool IsFormat() const override { return true; }
+ virtual int SizeOf() const = 0;
+ virtual char type_code() const = 0;
+ virtual void PrintData(void* data, FILE* out = stdout) const = 0;
+ virtual void Print(FILE* out = stdout) const override = 0;
+
+ virtual uint8_t* ToAddress(Debugger* debugger) const override {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ static Token* Tokenize(const char* arg);
+ static FormatToken* Cast(Token* tok) {
+ VIXL_ASSERT(tok->IsFormat());
+ return reinterpret_cast<FormatToken*>(tok);
+ }
+};
+
+
+template<typename T> class Format : public FormatToken {
+ public:
+ Format(const char* fmt, char type_code) : fmt_(fmt), type_code_(type_code) {}
+
+ virtual int SizeOf() const override { return sizeof(T); }
+ virtual char type_code() const override { return type_code_; }
+ virtual void PrintData(void* data, FILE* out = stdout) const override {
+ T value;
+ memcpy(&value, data, sizeof(value));
+ fprintf(out, fmt_, value);
+ }
+ virtual void Print(FILE* out = stdout) const override;
+
+ private:
+ const char* fmt_;
+ char type_code_;
+};
+
+// Tokens which don't fit any of the above.
+class UnknownToken : public Token {
+ public:
+ explicit UnknownToken(const char* arg) {
+ size_t size = strlen(arg) + 1;
+ unknown_ = js_pod_malloc<char>(size);
+ strncpy(unknown_, arg, size);
+ }
+ virtual ~UnknownToken() { js_free(unknown_); }
+ virtual uint8_t* ToAddress(Debugger* debugger) const override {
+ USE(debugger);
+ VIXL_ABORT();
+ }
+
+ virtual bool IsUnknown() const override { return true; }
+ virtual void Print(FILE* out = stdout) const override;
+
+ private:
+ char* unknown_;
+};
+
+
+// All debugger commands must subclass DebugCommand and implement Run, Print
+// and Build. Commands must also define kHelp and kAliases.
+class DebugCommand {
+ public:
+ explicit DebugCommand(Token* name) : name_(IdentifierToken::Cast(name)) {}
+ DebugCommand() : name_(NULL) {}
+ virtual ~DebugCommand() { js_delete(name_); }
+
+ const char* name() { return name_->value(); }
+ // Run the command on the given debugger. The command returns true if
+ // execution should move to the next instruction.
+ virtual bool Run(Debugger * debugger) = 0;
+ virtual void Print(FILE* out = stdout);
+
+ static bool Match(const char* name, const char** aliases);
+ static DebugCommand* Parse(char* line);
+ static void PrintHelp(const char** aliases,
+ const char* args,
+ const char* help);
+
+ private:
+ IdentifierToken* name_;
+};
+
+// For all commands below see their respective kHelp and kAliases in
+// debugger-a64.cc
+class HelpCommand : public DebugCommand {
+ public:
+ explicit HelpCommand(Token* name) : DebugCommand(name) {}
+
+ virtual bool Run(Debugger* debugger) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class ContinueCommand : public DebugCommand {
+ public:
+ explicit ContinueCommand(Token* name) : DebugCommand(name) {}
+
+ virtual bool Run(Debugger* debugger) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class StepCommand : public DebugCommand {
+ public:
+ StepCommand(Token* name, IntegerToken* count)
+ : DebugCommand(name), count_(count) {}
+ virtual ~StepCommand() { js_delete(count_); }
+
+ int64_t count() { return count_->value(); }
+ virtual bool Run(Debugger* debugger) override;
+ virtual void Print(FILE* out = stdout) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ IntegerToken* count_;
+};
+
+class DisasmCommand : public DebugCommand {
+ public:
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+};
+
+
+class PrintCommand : public DebugCommand {
+ public:
+ PrintCommand(Token* name, Token* target, FormatToken* format)
+ : DebugCommand(name), target_(target), format_(format) {}
+ virtual ~PrintCommand() {
+ js_delete(target_);
+ js_delete(format_);
+ }
+
+ Token* target() { return target_; }
+ FormatToken* format() { return format_; }
+ virtual bool Run(Debugger* debugger) override;
+ virtual void Print(FILE* out = stdout) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ Token* target_;
+ FormatToken* format_;
+};
+
+class ExamineCommand : public DebugCommand {
+ public:
+ ExamineCommand(Token* name,
+ Token* target,
+ FormatToken* format,
+ IntegerToken* count)
+ : DebugCommand(name), target_(target), format_(format), count_(count) {}
+ virtual ~ExamineCommand() {
+ js_delete(target_);
+ js_delete(format_);
+ js_delete(count_);
+ }
+
+ Token* target() { return target_; }
+ FormatToken* format() { return format_; }
+ IntegerToken* count() { return count_; }
+ virtual bool Run(Debugger* debugger) override;
+ virtual void Print(FILE* out = stdout) override;
+
+ static DebugCommand* Build(TokenVector&& args);
+
+ static const char* kHelp;
+ static const char* kAliases[];
+ static const char* kArguments;
+
+ private:
+ Token* target_;
+ FormatToken* format_;
+ IntegerToken* count_;
+};
+
+// Commands which name does not match any of the known commnand.
+class UnknownCommand : public DebugCommand {
+ public:
+ explicit UnknownCommand(TokenVector&& args) : args_(std::move(args)) {}
+ virtual ~UnknownCommand();
+
+ virtual bool Run(Debugger* debugger) override;
+
+ private:
+ TokenVector args_;
+};
+
+// Commands which name match a known command but the syntax is invalid.
+class InvalidCommand : public DebugCommand {
+ public:
+ InvalidCommand(TokenVector&& args, int index, const char* cause)
+ : args_(std::move(args)), index_(index), cause_(cause) {}
+ virtual ~InvalidCommand();
+
+ virtual bool Run(Debugger* debugger) override;
+
+ private:
+ TokenVector args_;
+ int index_;
+ const char* cause_;
+};
+
+const char* HelpCommand::kAliases[] = { "help", NULL };
+const char* HelpCommand::kArguments = NULL;
+const char* HelpCommand::kHelp = " Print this help.";
+
+const char* ContinueCommand::kAliases[] = { "continue", "c", NULL };
+const char* ContinueCommand::kArguments = NULL;
+const char* ContinueCommand::kHelp = " Resume execution.";
+
+const char* StepCommand::kAliases[] = { "stepi", "si", NULL };
+const char* StepCommand::kArguments = "[n = 1]";
+const char* StepCommand::kHelp = " Execute n next instruction(s).";
+
+const char* DisasmCommand::kAliases[] = { "disasm", "di", NULL };
+const char* DisasmCommand::kArguments = "[n = 10]";
+const char* DisasmCommand::kHelp =
+ " Disassemble n instruction(s) at pc.\n"
+ " This command is equivalent to x pc.i [n = 10]."
+;
+
+const char* PrintCommand::kAliases[] = { "print", "p", NULL };
+const char* PrintCommand::kArguments = "<entity>[.format]";
+const char* PrintCommand::kHelp =
+ " Print the given entity according to the given format.\n"
+ " The format parameter only affects individual registers; it is ignored\n"
+ " for other entities.\n"
+ " <entity> can be one of the following:\n"
+ " * A register name (such as x0, s1, ...).\n"
+ " * 'regs', to print all integer (W and X) registers.\n"
+ " * 'fpregs' to print all floating-point (S and D) registers.\n"
+ " * 'sysregs' to print all system registers (including NZCV).\n"
+ " * 'pc' to print the current program counter.\n"
+;
+
+const char* ExamineCommand::kAliases[] = { "m", "mem", "x", NULL };
+const char* ExamineCommand::kArguments = "<addr>[.format] [n = 10]";
+const char* ExamineCommand::kHelp =
+ " Examine memory. Print n items of memory at address <addr> according to\n"
+ " the given [.format].\n"
+ " Addr can be an immediate address, a register name or pc.\n"
+ " Format is made of a type letter: 'x' (hexadecimal), 's' (signed), 'u'\n"
+ " (unsigned), 'f' (floating point), i (instruction) and a size in bits\n"
+ " when appropriate (8, 16, 32, 64)\n"
+ " E.g 'x sp.x64' will print 10 64-bit words from the stack in\n"
+ " hexadecimal format."
+;
+
+const char* RegisterToken::kXAliases[kNumberOfRegisters][kMaxAliasNumber] = {
+ { "x0", NULL },
+ { "x1", NULL },
+ { "x2", NULL },
+ { "x3", NULL },
+ { "x4", NULL },
+ { "x5", NULL },
+ { "x6", NULL },
+ { "x7", NULL },
+ { "x8", NULL },
+ { "x9", NULL },
+ { "x10", NULL },
+ { "x11", NULL },
+ { "x12", NULL },
+ { "x13", NULL },
+ { "x14", NULL },
+ { "x15", NULL },
+ { "ip0", "x16", NULL },
+ { "ip1", "x17", NULL },
+ { "x18", "pr", NULL },
+ { "x19", NULL },
+ { "x20", NULL },
+ { "x21", NULL },
+ { "x22", NULL },
+ { "x23", NULL },
+ { "x24", NULL },
+ { "x25", NULL },
+ { "x26", NULL },
+ { "x27", NULL },
+ { "x28", NULL },
+ { "fp", "x29", NULL },
+ { "lr", "x30", NULL },
+ { "sp", NULL}
+};
+
+const char* RegisterToken::kWAliases[kNumberOfRegisters][kMaxAliasNumber] = {
+ { "w0", NULL },
+ { "w1", NULL },
+ { "w2", NULL },
+ { "w3", NULL },
+ { "w4", NULL },
+ { "w5", NULL },
+ { "w6", NULL },
+ { "w7", NULL },
+ { "w8", NULL },
+ { "w9", NULL },
+ { "w10", NULL },
+ { "w11", NULL },
+ { "w12", NULL },
+ { "w13", NULL },
+ { "w14", NULL },
+ { "w15", NULL },
+ { "w16", NULL },
+ { "w17", NULL },
+ { "w18", NULL },
+ { "w19", NULL },
+ { "w20", NULL },
+ { "w21", NULL },
+ { "w22", NULL },
+ { "w23", NULL },
+ { "w24", NULL },
+ { "w25", NULL },
+ { "w26", NULL },
+ { "w27", NULL },
+ { "w28", NULL },
+ { "w29", NULL },
+ { "w30", NULL },
+ { "wsp", NULL }
+};
+
+
+Debugger::Debugger(Decoder* decoder, FILE* stream)
+ : Simulator(decoder, stream),
+ debug_parameters_(DBG_INACTIVE),
+ pending_request_(false),
+ steps_(0),
+ last_command_(NULL) {
+ disasm_ = js_new<PrintDisassembler>(stdout);
+ printer_ = js_new<Decoder>();
+ printer_->AppendVisitor(disasm_);
+}
+
+
+Debugger::~Debugger() {
+ js_delete(disasm_);
+ js_delete(printer_);
+}
+
+
+void Debugger::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ if (pending_request()) RunDebuggerShell();
+ ExecuteInstruction();
+ LogAllWrittenRegisters();
+ }
+}
+
+
+void Debugger::PrintInstructions(const void* address, int64_t count) {
+ if (count == 0) {
+ return;
+ }
+
+ const Instruction* from = Instruction::CastConst(address);
+ if (count < 0) {
+ count = -count;
+ from -= (count - 1) * kInstructionSize;
+ }
+ const Instruction* to = from + count * kInstructionSize;
+
+ for (const Instruction* current = from;
+ current < to;
+ current = current->NextInstruction()) {
+ printer_->Decode(current);
+ }
+}
+
+
+void Debugger::PrintMemory(const uint8_t* address,
+ const FormatToken* format,
+ int64_t count) {
+ if (count == 0) {
+ return;
+ }
+
+ const uint8_t* from = address;
+ int size = format->SizeOf();
+ if (count < 0) {
+ count = -count;
+ from -= (count - 1) * size;
+ }
+ const uint8_t* to = from + count * size;
+
+ for (const uint8_t* current = from; current < to; current += size) {
+ if (((current - from) % 8) == 0) {
+ printf("\n%p: ", current);
+ }
+
+ uint64_t data = Memory::Read<uint64_t>(current);
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n\n");
+}
+
+
+void Debugger::PrintRegister(const Register& target_reg,
+ const char* name,
+ const FormatToken* format) {
+ const uint64_t reg_size = target_reg.size();
+ const uint64_t format_size = format->SizeOf() * 8;
+ const uint64_t count = reg_size / format_size;
+ const uint64_t mask = 0xffffffffffffffff >> (64 - format_size);
+ const uint64_t reg_value = reg<uint64_t>(target_reg.code(),
+ Reg31IsStackPointer);
+ VIXL_ASSERT(count > 0);
+
+ printf("%s = ", name);
+ for (uint64_t i = 1; i <= count; i++) {
+ uint64_t data = reg_value >> (reg_size - (i * format_size));
+ data &= mask;
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n");
+}
+
+
+// TODO(all): fix this for vector registers.
+void Debugger::PrintFPRegister(const FPRegister& target_fpreg,
+ const FormatToken* format) {
+ const unsigned fpreg_size = target_fpreg.size();
+ const uint64_t format_size = format->SizeOf() * 8;
+ const uint64_t count = fpreg_size / format_size;
+ const uint64_t mask = 0xffffffffffffffff >> (64 - format_size);
+ const uint64_t fpreg_value = vreg<uint64_t>(fpreg_size, target_fpreg.code());
+ VIXL_ASSERT(count > 0);
+
+ if (target_fpreg.Is32Bits()) {
+ printf("s%u = ", target_fpreg.code());
+ } else {
+ printf("d%u = ", target_fpreg.code());
+ }
+ for (uint64_t i = 1; i <= count; i++) {
+ uint64_t data = fpreg_value >> (fpreg_size - (i * format_size));
+ data &= mask;
+ format->PrintData(&data);
+ printf(" ");
+ }
+ printf("\n");
+}
+
+
+void Debugger::VisitException(const Instruction* instr) {
+ switch (instr->Mask(ExceptionMask)) {
+ case BRK:
+ DoBreakpoint(instr);
+ return;
+ case HLT:
+ VIXL_FALLTHROUGH();
+ default: Simulator::VisitException(instr);
+ }
+}
+
+
+// Read a command. A command will be at most kMaxDebugShellLine char long and
+// ends with '\n\0'.
+// TODO: Should this be a utility function?
+char* Debugger::ReadCommandLine(const char* prompt, char* buffer, int length) {
+ int fgets_calls = 0;
+ char* end = NULL;
+
+ printf("%s", prompt);
+ fflush(stdout);
+
+ do {
+ if (fgets(buffer, length, stdin) == NULL) {
+ printf(" ** Error while reading command. **\n");
+ return NULL;
+ }
+
+ fgets_calls++;
+ end = strchr(buffer, '\n');
+ } while (end == NULL);
+
+ if (fgets_calls != 1) {
+ printf(" ** Command too long. **\n");
+ return NULL;
+ }
+
+ // Remove the newline from the end of the command.
+ VIXL_ASSERT(end[1] == '\0');
+ VIXL_ASSERT((end - buffer) < (length - 1));
+ end[0] = '\0';
+
+ return buffer;
+}
+
+
+void Debugger::RunDebuggerShell() {
+ if (IsDebuggerRunning()) {
+ if (steps_ > 0) {
+ // Finish stepping first.
+ --steps_;
+ return;
+ }
+
+ printf("Next: ");
+ PrintInstructions(pc());
+ bool done = false;
+ while (!done) {
+ char buffer[kMaxDebugShellLine];
+ char* line = ReadCommandLine("vixl> ", buffer, kMaxDebugShellLine);
+
+ if (line == NULL) continue; // An error occurred.
+
+ DebugCommand* command = DebugCommand::Parse(line);
+ if (command != NULL) {
+ last_command_ = command;
+ }
+
+ if (last_command_ != NULL) {
+ done = last_command_->Run(this);
+ } else {
+ printf("No previous command to run!\n");
+ }
+ }
+
+ if ((debug_parameters_ & DBG_BREAK) != 0) {
+ // The break request has now been handled, move to next instruction.
+ debug_parameters_ &= ~DBG_BREAK;
+ increment_pc();
+ }
+ }
+}
+
+
+void Debugger::DoBreakpoint(const Instruction* instr) {
+ VIXL_ASSERT(instr->Mask(ExceptionMask) == BRK);
+
+ printf("Hit breakpoint at pc=%p.\n", reinterpret_cast<const void*>(instr));
+ set_debug_parameters(debug_parameters() | DBG_BREAK | DBG_ACTIVE);
+ // Make the shell point to the brk instruction.
+ set_pc(instr);
+}
+
+
+static bool StringToUInt64(uint64_t* value, const char* line, int base = 10) {
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ uint64_t parsed = strtoul(line, &endptr, base);
+
+ if (errno == ERANGE) {
+ // Overflow.
+ return false;
+ }
+
+ if (endptr == line) {
+ // No digits were parsed.
+ return false;
+ }
+
+ if (*endptr != '\0') {
+ // Non-digit characters present at the end.
+ return false;
+ }
+
+ *value = parsed;
+ return true;
+}
+
+
+static bool StringToInt64(int64_t* value, const char* line, int base = 10) {
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ int64_t parsed = strtol(line, &endptr, base);
+
+ if (errno == ERANGE) {
+ // Overflow, undeflow.
+ return false;
+ }
+
+ if (endptr == line) {
+ // No digits were parsed.
+ return false;
+ }
+
+ if (*endptr != '\0') {
+ // Non-digit characters present at the end.
+ return false;
+ }
+
+ *value = parsed;
+ return true;
+}
+
+
+Token* Token::Tokenize(const char* arg) {
+ if ((arg == NULL) || (*arg == '\0')) {
+ return NULL;
+ }
+
+ // The order is important. For example Identifier::Tokenize would consider
+ // any register to be a valid identifier.
+
+ Token* token = RegisterToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = FPRegisterToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = IdentifierToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = AddressToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ token = IntegerToken::Tokenize(arg);
+ if (token != NULL) {
+ return token;
+ }
+
+ return js_new<UnknownToken>(arg);
+}
+
+
+uint8_t* RegisterToken::ToAddress(Debugger* debugger) const {
+ VIXL_ASSERT(CanAddressMemory());
+ uint64_t reg_value = debugger->xreg(value().code(), Reg31IsStackPointer);
+ uint8_t* address = NULL;
+ memcpy(&address, &reg_value, sizeof(address));
+ return address;
+}
+
+
+void RegisterToken::Print(FILE* out) const {
+ VIXL_ASSERT(value().IsValid());
+ fprintf(out, "[Register %s]", Name());
+}
+
+
+const char* RegisterToken::Name() const {
+ if (value().Is32Bits()) {
+ return kWAliases[value().code()][0];
+ } else {
+ return kXAliases[value().code()][0];
+ }
+}
+
+
+Token* RegisterToken::Tokenize(const char* arg) {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ // Is it a X register or alias?
+ for (const char** current = kXAliases[i]; *current != NULL; current++) {
+ if (strcmp(arg, *current) == 0) {
+ return js_new<RegisterToken>(Register::XRegFromCode(i));
+ }
+ }
+
+ // Is it a W register or alias?
+ for (const char** current = kWAliases[i]; *current != NULL; current++) {
+ if (strcmp(arg, *current) == 0) {
+ return js_new<RegisterToken>(Register::WRegFromCode(i));
+ }
+ }
+ }
+
+ return NULL;
+}
+
+
+void FPRegisterToken::Print(FILE* out) const {
+ VIXL_ASSERT(value().IsValid());
+ char prefix = value().Is32Bits() ? 's' : 'd';
+ fprintf(out, "[FPRegister %c%" PRIu32 "]", prefix, value().code());
+}
+
+
+Token* FPRegisterToken::Tokenize(const char* arg) {
+ if (strlen(arg) < 2) {
+ return NULL;
+ }
+
+ switch (*arg) {
+ case 's':
+ case 'd':
+ const char* cursor = arg + 1;
+ uint64_t code = 0;
+ if (!StringToUInt64(&code, cursor)) {
+ return NULL;
+ }
+
+ if (code > kNumberOfFPRegisters) {
+ return NULL;
+ }
+
+ VRegister fpreg = NoVReg;
+ switch (*arg) {
+ case 's':
+ fpreg = VRegister::SRegFromCode(static_cast<unsigned>(code));
+ break;
+ case 'd':
+ fpreg = VRegister::DRegFromCode(static_cast<unsigned>(code));
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+
+ return js_new<FPRegisterToken>(fpreg);
+ }
+
+ return NULL;
+}
+
+
+uint8_t* IdentifierToken::ToAddress(Debugger* debugger) const {
+ VIXL_ASSERT(CanAddressMemory());
+ const Instruction* pc_value = debugger->pc();
+ uint8_t* address = NULL;
+ memcpy(&address, &pc_value, sizeof(address));
+ return address;
+}
+
+void IdentifierToken::Print(FILE* out) const {
+ fprintf(out, "[Identifier %s]", value());
+}
+
+
+Token* IdentifierToken::Tokenize(const char* arg) {
+ if (!isalpha(arg[0])) {
+ return NULL;
+ }
+
+ const char* cursor = arg + 1;
+ while ((*cursor != '\0') && isalnum(*cursor)) {
+ ++cursor;
+ }
+
+ if (*cursor == '\0') {
+ return js_new<IdentifierToken>(arg);
+ }
+
+ return NULL;
+}
+
+
+uint8_t* AddressToken::ToAddress(Debugger* debugger) const {
+ USE(debugger);
+ return value();
+}
+
+
+void AddressToken::Print(FILE* out) const {
+ fprintf(out, "[Address %p]", value());
+}
+
+
+Token* AddressToken::Tokenize(const char* arg) {
+ if ((strlen(arg) < 3) || (arg[0] != '0') || (arg[1] != 'x')) {
+ return NULL;
+ }
+
+ uint64_t ptr = 0;
+ if (!StringToUInt64(&ptr, arg, 16)) {
+ return NULL;
+ }
+
+ uint8_t* address = reinterpret_cast<uint8_t*>(ptr);
+ return js_new<AddressToken>(address);
+}
+
+
+void IntegerToken::Print(FILE* out) const {
+ fprintf(out, "[Integer %" PRId64 "]", value());
+}
+
+
+Token* IntegerToken::Tokenize(const char* arg) {
+ int64_t value = 0;
+ if (!StringToInt64(&value, arg)) {
+ return NULL;
+ }
+
+ return js_new<IntegerToken>(value);
+}
+
+
+Token* FormatToken::Tokenize(const char* arg) {
+ size_t length = strlen(arg);
+ switch (arg[0]) {
+ case 'x':
+ case 's':
+ case 'u':
+ case 'f':
+ if (length == 1) return NULL;
+ break;
+ case 'i':
+ if (length == 1) return js_new<Format<uint32_t>>("%08" PRIx32, 'i');
+ VIXL_FALLTHROUGH();
+ default: return NULL;
+ }
+
+ char* endptr = NULL;
+ errno = 0; // Reset errors.
+ uint64_t count = strtoul(arg + 1, &endptr, 10);
+
+ if (errno != 0) {
+ // Overflow, etc.
+ return NULL;
+ }
+
+ if (endptr == arg) {
+ // No digits were parsed.
+ return NULL;
+ }
+
+ if (*endptr != '\0') {
+ // There are unexpected (non-digit) characters after the number.
+ return NULL;
+ }
+
+ switch (arg[0]) {
+ case 'x':
+ switch (count) {
+ case 8: return js_new<Format<uint8_t>>("%02" PRIx8, 'x');
+ case 16: return js_new<Format<uint16_t>>("%04" PRIx16, 'x');
+ case 32: return js_new<Format<uint32_t>>("%08" PRIx32, 'x');
+ case 64: return js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ default: return NULL;
+ }
+ case 's':
+ switch (count) {
+ case 8: return js_new<Format<int8_t>>("%4" PRId8, 's');
+ case 16: return js_new<Format<int16_t>>("%6" PRId16, 's');
+ case 32: return js_new<Format<int32_t>>("%11" PRId32, 's');
+ case 64: return js_new<Format<int64_t>>("%20" PRId64, 's');
+ default: return NULL;
+ }
+ case 'u':
+ switch (count) {
+ case 8: return js_new<Format<uint8_t>>("%3" PRIu8, 'u');
+ case 16: return js_new<Format<uint16_t>>("%5" PRIu16, 'u');
+ case 32: return js_new<Format<uint32_t>>("%10" PRIu32, 'u');
+ case 64: return js_new<Format<uint64_t>>("%20" PRIu64, 'u');
+ default: return NULL;
+ }
+ case 'f':
+ switch (count) {
+ case 32: return js_new<Format<float>>("%13g", 'f');
+ case 64: return js_new<Format<double>>("%13g", 'f');
+ default: return NULL;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ return NULL;
+ }
+}
+
+
+template<typename T>
+void Format<T>::Print(FILE* out) const {
+ unsigned size = sizeof(T) * 8;
+ fprintf(out, "[Format %c%u - %s]", type_code_, size, fmt_);
+}
+
+
+void UnknownToken::Print(FILE* out) const {
+ fprintf(out, "[Unknown %s]", unknown_);
+}
+
+
+void DebugCommand::Print(FILE* out) {
+ fprintf(out, "%s", name());
+}
+
+
+bool DebugCommand::Match(const char* name, const char** aliases) {
+ for (const char** current = aliases; *current != NULL; current++) {
+ if (strcmp(name, *current) == 0) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+DebugCommand* DebugCommand::Parse(char* line) {
+ TokenVector args;
+
+ for (char* chunk = strtok(line, " \t");
+ chunk != NULL;
+ chunk = strtok(NULL, " \t")) {
+ char* dot = strchr(chunk, '.');
+ if (dot != NULL) {
+ // 'Token.format'.
+ Token* format = FormatToken::Tokenize(dot + 1);
+ if (format != NULL) {
+ *dot = '\0';
+ (void)args.append(Token::Tokenize(chunk));
+ (void)args.append(format);
+ } else {
+ // Error while parsing the format, push the UnknownToken so an error
+ // can be accurately reported.
+ (void)args.append(Token::Tokenize(chunk));
+ }
+ } else {
+ (void)args.append(Token::Tokenize(chunk));
+ }
+ }
+
+ if (args.empty()) {
+ return NULL;
+ }
+
+ if (!args[0]->IsIdentifier()) {
+ return js_new<InvalidCommand>(std::move(args), 0, "command name is not valid");
+ }
+
+ const char* name = IdentifierToken::Cast(args[0])->value();
+ #define RETURN_IF_MATCH(Command) \
+ if (Match(name, Command::kAliases)) { \
+ return Command::Build(std::move(args)); \
+ }
+ DEBUG_COMMAND_LIST(RETURN_IF_MATCH);
+ #undef RETURN_IF_MATCH
+
+ return js_new<UnknownCommand>(std::move(args));
+}
+
+
+void DebugCommand::PrintHelp(const char** aliases,
+ const char* args,
+ const char* help) {
+ VIXL_ASSERT(aliases[0] != NULL);
+ VIXL_ASSERT(help != NULL);
+
+ printf("\n----\n\n");
+ for (const char** current = aliases; *current != NULL; current++) {
+ if (args != NULL) {
+ printf("%s %s\n", *current, args);
+ } else {
+ printf("%s\n", *current);
+ }
+ }
+ printf("\n%s\n", help);
+}
+
+
+bool HelpCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ #define PRINT_HELP(Command) \
+ DebugCommand::PrintHelp(Command::kAliases, \
+ Command::kArguments, \
+ Command::kHelp);
+ DEBUG_COMMAND_LIST(PRINT_HELP);
+ #undef PRINT_HELP
+ printf("\n----\n\n");
+
+ return false;
+}
+
+
+DebugCommand* HelpCommand::Build(TokenVector&& args) {
+ if (args.length() != 1) {
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<HelpCommand>(args[0]);
+}
+
+
+bool ContinueCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ debugger->set_debug_parameters(debugger->debug_parameters() & ~DBG_ACTIVE);
+ return true;
+}
+
+
+DebugCommand* ContinueCommand::Build(TokenVector&& args) {
+ if (args.length() != 1) {
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<ContinueCommand>(args[0]);
+}
+
+
+bool StepCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ int64_t steps = count();
+ if (steps < 0) {
+ printf(" ** invalid value for steps: %" PRId64 " (<0) **\n", steps);
+ } else if (steps > 1) {
+ debugger->set_steps(steps - 1);
+ }
+
+ return true;
+}
+
+
+void StepCommand::Print(FILE* out) {
+ fprintf(out, "%s %" PRId64 "", name(), count());
+}
+
+
+DebugCommand* StepCommand::Build(TokenVector&& args) {
+ IntegerToken* count = NULL;
+ switch (args.length()) {
+ case 1: { // step [1]
+ count = js_new<IntegerToken>(1);
+ break;
+ }
+ case 2: { // step n
+ Token* first = args[1];
+ if (!first->IsInteger()) {
+ return js_new<InvalidCommand>(std::move(args), 1, "expects int");
+ }
+ count = IntegerToken::Cast(first);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<StepCommand>(args[0], count);
+}
+
+
+DebugCommand* DisasmCommand::Build(TokenVector&& args) {
+ IntegerToken* count = NULL;
+ switch (args.length()) {
+ case 1: { // disasm [10]
+ count = js_new<IntegerToken>(10);
+ break;
+ }
+ case 2: { // disasm n
+ Token* first = args[1];
+ if (!first->IsInteger()) {
+ return js_new<InvalidCommand>(std::move(args), 1, "expects int");
+ }
+
+ count = IntegerToken::Cast(first);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ Token* target = js_new<IdentifierToken>("pc");
+ FormatToken* format = js_new<Format<uint32_t>>("%08" PRIx32, 'i');
+ return js_new<ExamineCommand>(args[0], target, format, count);
+}
+
+
+void PrintCommand::Print(FILE* out) {
+ fprintf(out, "%s ", name());
+ target()->Print(out);
+ if (format() != NULL) format()->Print(out);
+}
+
+
+bool PrintCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ Token* tok = target();
+ if (tok->IsIdentifier()) {
+ char* identifier = IdentifierToken::Cast(tok)->value();
+ if (strcmp(identifier, "regs") == 0) {
+ debugger->PrintRegisters();
+ } else if (strcmp(identifier, "fpregs") == 0) {
+ debugger->PrintVRegisters();
+ } else if (strcmp(identifier, "sysregs") == 0) {
+ debugger->PrintSystemRegisters();
+ } else if (strcmp(identifier, "pc") == 0) {
+ printf("pc = %16p\n", reinterpret_cast<const void*>(debugger->pc()));
+ } else {
+ printf(" ** Unknown identifier to print: %s **\n", identifier);
+ }
+
+ return false;
+ }
+
+ FormatToken* format_tok = format();
+ VIXL_ASSERT(format_tok != NULL);
+ if (format_tok->type_code() == 'i') {
+ // TODO(all): Add support for instruction disassembly.
+ printf(" ** unsupported format: instructions **\n");
+ return false;
+ }
+
+ if (tok->IsRegister()) {
+ RegisterToken* reg_tok = RegisterToken::Cast(tok);
+ Register reg = reg_tok->value();
+ debugger->PrintRegister(reg, reg_tok->Name(), format_tok);
+ return false;
+ }
+
+ if (tok->IsFPRegister()) {
+ FPRegister fpreg = FPRegisterToken::Cast(tok)->value();
+ debugger->PrintFPRegister(fpreg, format_tok);
+ return false;
+ }
+
+ VIXL_UNREACHABLE();
+ return false;
+}
+
+
+DebugCommand* PrintCommand::Build(TokenVector&& args) {
+ if (args.length() < 2) {
+ return js_new<InvalidCommand>(std::move(args), -1, "too few arguments");
+ }
+
+ Token* target = args[1];
+ if (!target->IsRegister() &&
+ !target->IsFPRegister() &&
+ !target->IsIdentifier()) {
+ return js_new<InvalidCommand>(std::move(args), 1, "expects reg or identifier");
+ }
+
+ FormatToken* format = NULL;
+ int target_size = 0;
+ if (target->IsRegister()) {
+ Register reg = RegisterToken::Cast(target)->value();
+ target_size = reg.SizeInBytes();
+ } else if (target->IsFPRegister()) {
+ FPRegister fpreg = FPRegisterToken::Cast(target)->value();
+ target_size = fpreg.SizeInBytes();
+ }
+ // If the target is an identifier there must be no format. This is checked
+ // in the switch statement below.
+
+ switch (args.length()) {
+ case 2: {
+ if (target->IsRegister()) {
+ switch (target_size) {
+ case 4: format = js_new<Format<uint32_t>>("%08" PRIx32, 'x'); break;
+ case 8: format = js_new<Format<uint64_t>>("%016" PRIx64, 'x'); break;
+ default: VIXL_UNREACHABLE();
+ }
+ } else if (target->IsFPRegister()) {
+ switch (target_size) {
+ case 4: format = js_new<Format<float>>("%8g", 'f'); break;
+ case 8: format = js_new<Format<double>>("%8g", 'f'); break;
+ default: VIXL_UNREACHABLE();
+ }
+ }
+ break;
+ }
+ case 3: {
+ if (target->IsIdentifier()) {
+ return js_new<InvalidCommand>(std::move(args), 2,
+ "format is only allowed with registers");
+ }
+
+ Token* second = args[2];
+ if (!second->IsFormat()) {
+ return js_new<InvalidCommand>(std::move(args), 2, "expects format");
+ }
+ format = FormatToken::Cast(second);
+
+ if (format->SizeOf() > target_size) {
+ return js_new<InvalidCommand>(std::move(args), 2, "format too wide");
+ }
+
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<PrintCommand>(args[0], target, format);
+}
+
+
+bool ExamineCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+
+ uint8_t* address = target()->ToAddress(debugger);
+ int64_t amount = count()->value();
+ if (format()->type_code() == 'i') {
+ debugger->PrintInstructions(address, amount);
+ } else {
+ debugger->PrintMemory(address, format(), amount);
+ }
+
+ return false;
+}
+
+
+void ExamineCommand::Print(FILE* out) {
+ fprintf(out, "%s ", name());
+ format()->Print(out);
+ target()->Print(out);
+}
+
+
+DebugCommand* ExamineCommand::Build(TokenVector&& args) {
+ if (args.length() < 2) {
+ return js_new<InvalidCommand>(std::move(args), -1, "too few arguments");
+ }
+
+ Token* target = args[1];
+ if (!target->CanAddressMemory()) {
+ return js_new<InvalidCommand>(std::move(args), 1, "expects address");
+ }
+
+ FormatToken* format = NULL;
+ IntegerToken* count = NULL;
+
+ switch (args.length()) {
+ case 2: { // mem addr[.x64] [10]
+ format = js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ count = js_new<IntegerToken>(10);
+ break;
+ }
+ case 3: { // mem addr.format [10]
+ // mem addr[.x64] n
+ Token* second = args[2];
+ if (second->IsFormat()) {
+ format = FormatToken::Cast(second);
+ count = js_new<IntegerToken>(10);
+ break;
+ } else if (second->IsInteger()) {
+ format = js_new<Format<uint64_t>>("%016" PRIx64, 'x');
+ count = IntegerToken::Cast(second);
+ } else {
+ return js_new<InvalidCommand>(std::move(args), 2, "expects format or integer");
+ }
+ VIXL_UNREACHABLE();
+ break;
+ }
+ case 4: { // mem addr.format n
+ Token* second = args[2];
+ Token* third = args[3];
+ if (!second->IsFormat() || !third->IsInteger()) {
+ return js_new<InvalidCommand>(std::move(args), -1, "expects addr[.format] [n]");
+ }
+ format = FormatToken::Cast(second);
+ count = IntegerToken::Cast(third);
+ break;
+ }
+ default:
+ return js_new<InvalidCommand>(std::move(args), -1, "too many arguments");
+ }
+
+ return js_new<ExamineCommand>(args[0], target, format, count);
+}
+
+
+UnknownCommand::~UnknownCommand() {
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ js_delete(args_[i]);
+ }
+}
+
+
+bool UnknownCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ printf(" ** Unknown Command:");
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ printf(" ");
+ args_[i]->Print(stdout);
+ }
+ printf(" **\n");
+
+ return false;
+}
+
+
+InvalidCommand::~InvalidCommand() {
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ js_delete(args_[i]);
+ }
+}
+
+
+bool InvalidCommand::Run(Debugger* debugger) {
+ VIXL_ASSERT(debugger->IsDebuggerRunning());
+ USE(debugger);
+
+ printf(" ** Invalid Command:");
+ const size_t size = args_.length();
+ for (size_t i = 0; i < size; ++i) {
+ printf(" ");
+ if (i == static_cast<size_t>(index_)) {
+ printf(">>");
+ args_[i]->Print(stdout);
+ printf("<<");
+ } else {
+ args_[i]->Print(stdout);
+ }
+ }
+ printf(" **\n");
+ printf(" ** %s\n", cause_);
+
+ return false;
+}
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Debugger-vixl.h b/js/src/jit/arm64/vixl/Debugger-vixl.h
new file mode 100644
index 0000000000..7236bf1e5e
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Debugger-vixl.h
@@ -0,0 +1,117 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef JS_SIMULATOR_ARM64
+
+#ifndef VIXL_A64_DEBUGGER_A64_H_
+#define VIXL_A64_DEBUGGER_A64_H_
+
+#include <ctype.h>
+#include <errno.h>
+#include <limits.h>
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+// Flags that represent the debugger state.
+enum DebugParameters {
+ DBG_INACTIVE = 0,
+ DBG_ACTIVE = 1 << 0, // The debugger is active.
+ DBG_BREAK = 1 << 1 // The debugger is at a breakpoint.
+};
+
+// Forward declarations.
+class DebugCommand;
+class Token;
+class FormatToken;
+
+class Debugger : public Simulator {
+ public:
+ explicit Debugger(Decoder* decoder, FILE* stream = stdout);
+ ~Debugger();
+
+ virtual void Run() override;
+ virtual void VisitException(const Instruction* instr) override;
+
+ int debug_parameters() const { return debug_parameters_; }
+ void set_debug_parameters(int parameters) {
+ debug_parameters_ = parameters;
+
+ update_pending_request();
+ }
+
+ // Numbers of instructions to execute before the debugger shell is given
+ // back control.
+ int64_t steps() const { return steps_; }
+ void set_steps(int64_t value) {
+ VIXL_ASSERT(value > 1);
+ steps_ = value;
+ }
+
+ bool IsDebuggerRunning() const {
+ return (debug_parameters_ & DBG_ACTIVE) != 0;
+ }
+
+ bool pending_request() const { return pending_request_; }
+ void update_pending_request() {
+ pending_request_ = IsDebuggerRunning();
+ }
+
+ void PrintInstructions(const void* address, int64_t count = 1);
+ void PrintMemory(const uint8_t* address,
+ const FormatToken* format,
+ int64_t count = 1);
+ void PrintRegister(const Register& target_reg,
+ const char* name,
+ const FormatToken* format);
+ void PrintFPRegister(const FPRegister& target_fpreg,
+ const FormatToken* format);
+
+ private:
+ char* ReadCommandLine(const char* prompt, char* buffer, int length);
+ void RunDebuggerShell();
+ void DoBreakpoint(const Instruction* instr);
+
+ int debug_parameters_;
+ bool pending_request_;
+ int64_t steps_;
+ DebugCommand* last_command_;
+ PrintDisassembler* disasm_;
+ Decoder* printer_;
+
+ // Length of the biggest command line accepted by the debugger shell.
+ static const int kMaxDebugShellLine = 256;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_DEBUGGER_A64_H_
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Decoder-vixl.cpp b/js/src/jit/arm64/vixl/Decoder-vixl.cpp
new file mode 100644
index 0000000000..a306e66b3c
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Decoder-vixl.cpp
@@ -0,0 +1,878 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Decoder-vixl.h"
+
+#include <algorithm>
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+void Decoder::DecodeInstruction(const Instruction *instr) {
+ if (instr->Bits(28, 27) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(27, 24)) {
+ // 0: PC relative addressing.
+ case 0x0: DecodePCRelAddressing(instr); break;
+
+ // 1: Add/sub immediate.
+ case 0x1: DecodeAddSubImmediate(instr); break;
+
+ // A: Logical shifted register.
+ // Add/sub with carry.
+ // Conditional compare register.
+ // Conditional compare immediate.
+ // Conditional select.
+ // Data processing 1 source.
+ // Data processing 2 source.
+ // B: Add/sub shifted register.
+ // Add/sub extended register.
+ // Data processing 3 source.
+ case 0xA:
+ case 0xB: DecodeDataProcessing(instr); break;
+
+ // 2: Logical immediate.
+ // Move wide immediate.
+ case 0x2: DecodeLogical(instr); break;
+
+ // 3: Bitfield.
+ // Extract.
+ case 0x3: DecodeBitfieldExtract(instr); break;
+
+ // 4: Unconditional branch immediate.
+ // Exception generation.
+ // Compare and branch immediate.
+ // 5: Compare and branch immediate.
+ // Conditional branch.
+ // System.
+ // 6,7: Unconditional branch.
+ // Test and branch immediate.
+ case 0x4:
+ case 0x5:
+ case 0x6:
+ case 0x7: DecodeBranchSystemException(instr); break;
+
+ // 8,9: Load/store register pair post-index.
+ // Load register literal.
+ // Load/store register unscaled immediate.
+ // Load/store register immediate post-index.
+ // Load/store register immediate pre-index.
+ // Load/store register offset.
+ // Load/store exclusive.
+ // C,D: Load/store register pair offset.
+ // Load/store register pair pre-index.
+ // Load/store register unsigned immediate.
+ // Advanced SIMD.
+ case 0x8:
+ case 0x9:
+ case 0xC:
+ case 0xD: DecodeLoadStore(instr); break;
+
+ // E: FP fixed point conversion.
+ // FP integer conversion.
+ // FP data processing 1 source.
+ // FP compare.
+ // FP immediate.
+ // FP data processing 2 source.
+ // FP conditional compare.
+ // FP conditional select.
+ // Advanced SIMD.
+ // F: FP data processing 3 source.
+ // Advanced SIMD.
+ case 0xE:
+ case 0xF: DecodeFP(instr); break;
+ }
+ }
+}
+
+void Decoder::AppendVisitor(DecoderVisitor* new_visitor) {
+ MOZ_ALWAYS_TRUE(visitors_.append(new_visitor));
+}
+
+
+void Decoder::PrependVisitor(DecoderVisitor* new_visitor) {
+ MOZ_ALWAYS_TRUE(visitors_.insert(visitors_.begin(), new_visitor));
+}
+
+
+void Decoder::InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ for (auto it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ MOZ_ALWAYS_TRUE(visitors_.insert(it, new_visitor));
+ return;
+ }
+ }
+ // We reached the end of the list without finding registered_visitor.
+ MOZ_ALWAYS_TRUE(visitors_.append(new_visitor));
+}
+
+
+void Decoder::InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor) {
+ for (auto it = visitors_.begin(); it != visitors_.end(); it++) {
+ if (*it == registered_visitor) {
+ it++;
+ MOZ_ALWAYS_TRUE(visitors_.insert(it, new_visitor));
+ return;
+ }
+ }
+ // We reached the end of the list without finding registered_visitor.
+ MOZ_ALWAYS_TRUE(visitors_.append(new_visitor));
+}
+
+
+void Decoder::RemoveVisitor(DecoderVisitor* visitor) {
+ visitors_.erase(std::remove(visitors_.begin(), visitors_.end(), visitor),
+ visitors_.end());
+}
+
+
+void Decoder::DecodePCRelAddressing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x0);
+ // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
+ // decode.
+ VIXL_ASSERT(instr->Bit(28) == 0x1);
+ VisitPCRelAddressing(instr);
+}
+
+
+void Decoder::DecodeBranchSystemException(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0x4) ||
+ (instr->Bits(27, 24) == 0x5) ||
+ (instr->Bits(27, 24) == 0x6) ||
+ (instr->Bits(27, 24) == 0x7) );
+
+ switch (instr->Bits(31, 29)) {
+ case 0:
+ case 4: {
+ VisitUnconditionalBranch(instr);
+ break;
+ }
+ case 1:
+ case 5: {
+ if (instr->Bit(25) == 0) {
+ VisitCompareBranch(instr);
+ } else {
+ VisitTestBranch(instr);
+ }
+ break;
+ }
+ case 2: {
+ if (instr->Bit(25) == 0) {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Mask(0x01000010) == 0x00000010)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalBranch(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(25) == 0) {
+ if (instr->Bit(24) == 0) {
+ if ((instr->Bits(4, 2) != 0) ||
+ (instr->Mask(0x00E0001D) == 0x00200001) ||
+ (instr->Mask(0x00E0001D) == 0x00400001) ||
+ (instr->Mask(0x00E0001E) == 0x00200002) ||
+ (instr->Mask(0x00E0001E) == 0x00400002) ||
+ (instr->Mask(0x00E0001C) == 0x00600000) ||
+ (instr->Mask(0x00E0001C) == 0x00800000) ||
+ (instr->Mask(0x00E0001F) == 0x00A00000) ||
+ (instr->Mask(0x00C0001C) == 0x00C00000)) {
+ if (instr->InstructionBits() == UNDEFINED_INST_PATTERN) {
+ VisitException(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ VisitException(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ const Instr masked_003FF0E0 = instr->Mask(0x003FF0E0);
+ if ((instr->Bits(21, 19) == 0x4) ||
+ (masked_003FF0E0 == 0x00033000) ||
+ (masked_003FF0E0 == 0x003FF020) ||
+ (masked_003FF0E0 == 0x003FF060) ||
+ (masked_003FF0E0 == 0x003FF0E0) ||
+ (instr->Mask(0x00388000) == 0x00008000) ||
+ (instr->Mask(0x0038E000) == 0x00000000) ||
+ (instr->Mask(0x0039E000) == 0x00002000) ||
+ (instr->Mask(0x003AE000) == 0x00002000) ||
+ (instr->Mask(0x003CE000) == 0x00042000) ||
+ (instr->Mask(0x003FFFC0) == 0x000320C0) ||
+ (instr->Mask(0x003FF100) == 0x00032100) ||
+ // (instr->Mask(0x003FF200) == 0x00032200) || // match CSDB
+ (instr->Mask(0x003FF400) == 0x00032400) ||
+ (instr->Mask(0x003FF800) == 0x00032800) ||
+ (instr->Mask(0x0038F000) == 0x00005000) ||
+ (instr->Mask(0x0038E000) == 0x00006000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitSystem(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(24) == 0x1) ||
+ (instr->Bits(20, 16) != 0x1F) ||
+ (instr->Bits(15, 10) != 0) ||
+ (instr->Bits(4, 0) != 0) ||
+ (instr->Bits(24, 21) == 0x3) ||
+ (instr->Bits(24, 22) == 0x3)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitUnconditionalBranchToRegister(instr);
+ }
+ }
+ break;
+ }
+ case 3:
+ case 7: {
+ VisitUnallocated(instr);
+ break;
+ }
+ }
+}
+
+
+void Decoder::DecodeLoadStore(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0x8) ||
+ (instr->Bits(27, 24) == 0x9) ||
+ (instr->Bits(27, 24) == 0xC) ||
+ (instr->Bits(27, 24) == 0xD) );
+ // TODO(all): rearrange the tree to integrate this branch.
+ if ((instr->Bit(28) == 0) && (instr->Bit(29) == 0) && (instr->Bit(26) == 1)) {
+ DecodeNEONLoadStore(instr);
+ return;
+ }
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(26) == 0) {
+ VisitLoadStoreExclusive(instr);
+ } else {
+ VIXL_UNREACHABLE();
+ }
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Mask(0xC4400000) == 0xC0400000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePairNonTemporal(instr);
+ }
+ } else {
+ VisitLoadStorePairPostIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Mask(0xC4000000) == 0xC4000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadLiteral(instr);
+ }
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(21) == 0) {
+ switch (instr->Bits(11, 10)) {
+ case 0: {
+ VisitLoadStoreUnscaledOffset(instr);
+ break;
+ }
+ case 1: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePostIndex(instr);
+ }
+ break;
+ }
+ case 2: {
+ // TODO: VisitLoadStoreRegisterOffsetUnpriv.
+ VisitUnimplemented(instr);
+ break;
+ }
+ case 3: {
+ if (instr->Mask(0xC4C00000) == 0xC0800000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStorePreIndex(instr);
+ }
+ break;
+ }
+ }
+ } else {
+ if (instr->Bits(11, 10) == 0x2) {
+ if (instr->Bit(14) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreRegisterOffset(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Bits(31, 30) == 0x3) ||
+ (instr->Mask(0xC4400000) == 0x40000000)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLoadStorePairOffset(instr);
+ } else {
+ VisitLoadStorePairPreIndex(instr);
+ }
+ }
+ }
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitUnallocated(instr);
+ } else {
+ if ((instr->Mask(0x84C00000) == 0x80C00000) ||
+ (instr->Mask(0x44800000) == 0x44800000) ||
+ (instr->Mask(0x84800000) == 0x84800000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLoadStoreUnsignedOffset(instr);
+ }
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeLogical(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x2);
+
+ if (instr->Mask(0x80400000) == 0x00400000) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitLogicalImmediate(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitMoveWideImmediate(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeBitfieldExtract(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x3);
+
+ if ((instr->Mask(0x80400000) == 0x80000000) ||
+ (instr->Mask(0x80400000) == 0x00400000) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else if (instr->Bit(23) == 0) {
+ if ((instr->Mask(0x80200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) == 0x60000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitBitfield(instr);
+ }
+ } else {
+ if ((instr->Mask(0x60200000) == 0x00200000) ||
+ (instr->Mask(0x60000000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitExtract(instr);
+ }
+ }
+}
+
+
+void Decoder::DecodeAddSubImmediate(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(27, 24) == 0x1);
+ if (instr->Bit(23) == 1) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubImmediate(instr);
+ }
+}
+
+
+void Decoder::DecodeDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0xA) ||
+ (instr->Bits(27, 24) == 0xB));
+
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(28) == 0) {
+ if (instr->Mask(0x80008000) == 0x00008000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitLogicalShifted(instr);
+ }
+ } else {
+ switch (instr->Bits(23, 21)) {
+ case 0: {
+ if (instr->Mask(0x0000FC00) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubWithCarry(instr);
+ }
+ break;
+ }
+ case 2: {
+ if ((instr->Bit(29) == 0) ||
+ (instr->Mask(0x00000410) != 0)) {
+ VisitUnallocated(instr);
+ } else {
+ if (instr->Bit(11) == 0) {
+ VisitConditionalCompareRegister(instr);
+ } else {
+ VisitConditionalCompareImmediate(instr);
+ }
+ }
+ break;
+ }
+ case 4: {
+ if (instr->Mask(0x20000800) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ VisitConditionalSelect(instr);
+ }
+ break;
+ }
+ case 6: {
+ if (instr->Bit(29) == 0x1) {
+ VisitUnallocated(instr);
+ VIXL_FALLTHROUGH();
+ } else {
+ if (instr->Bit(30) == 0) {
+ if ((instr->Bit(15) == 0x1) ||
+ (instr->Bits(15, 11) == 0) ||
+ (instr->Bits(15, 12) == 0x1) ||
+ (instr->Bits(15, 12) == 0x3) ||
+ (instr->Bits(15, 13) == 0x3) ||
+ (instr->Mask(0x8000EC00) == 0x00004C00) ||
+ (instr->Mask(0x8000E800) == 0x80004000) ||
+ (instr->Mask(0x8000E400) == 0x80004000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing2Source(instr);
+ }
+ } else {
+ if ((instr->Bit(13) == 1) ||
+ (instr->Bits(20, 16) != 0) ||
+ (instr->Bits(15, 14) != 0) ||
+ (instr->Mask(0xA01FFC00) == 0x00000C00) ||
+ (instr->Mask(0x201FF800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing1Source(instr);
+ }
+ }
+ break;
+ }
+ }
+ case 1:
+ case 3:
+ case 5:
+ case 7: VisitUnallocated(instr); break;
+ }
+ }
+ } else {
+ if (instr->Bit(28) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x80008000) == 0x00008000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubShifted(instr);
+ }
+ } else {
+ if ((instr->Mask(0x00C00000) != 0x00000000) ||
+ (instr->Mask(0x00001400) == 0x00001400) ||
+ (instr->Mask(0x00001800) == 0x00001800)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitAddSubExtended(instr);
+ }
+ }
+ } else {
+ if ((instr->Bit(30) == 0x1) ||
+ (instr->Bits(30, 29) == 0x1) ||
+ (instr->Mask(0xE0600000) == 0x00200000) ||
+ (instr->Mask(0xE0608000) == 0x00400000) ||
+ (instr->Mask(0x60608000) == 0x00408000) ||
+ (instr->Mask(0x60E00000) == 0x00E00000) ||
+ (instr->Mask(0x60E00000) == 0x00800000) ||
+ (instr->Mask(0x60E00000) == 0x00600000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitDataProcessing3Source(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeFP(const Instruction* instr) {
+ VIXL_ASSERT((instr->Bits(27, 24) == 0xE) ||
+ (instr->Bits(27, 24) == 0xF));
+ if (instr->Bit(28) == 0) {
+ DecodeNEONVectorDataProcessing(instr);
+ } else {
+ if (instr->Bits(31, 30) == 0x3) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(31, 30) == 0x1) {
+ DecodeNEONScalarDataProcessing(instr);
+ } else {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if ((instr->Bit(23) == 1) ||
+ (instr->Bit(18) == 1) ||
+ (instr->Mask(0x80008000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x00000000) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x00160000) == 0x00000000) ||
+ (instr->Mask(0x00160000) == 0x00120000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPFixedPointConvert(instr);
+ }
+ } else {
+ if (instr->Bits(15, 10) == 32) {
+ VisitUnallocated(instr);
+ } else if (instr->Bits(15, 10) == 0) {
+ if ((instr->Bits(23, 22) == 0x3) ||
+ (instr->Mask(0x000E0000) == 0x000A0000) ||
+ (instr->Mask(0x000E0000) == 0x000C0000) ||
+ (instr->Mask(0x00160000) == 0x00120000) ||
+ (instr->Mask(0x00160000) == 0x00140000) ||
+ (instr->Mask(0x20C40000) == 0x00800000) ||
+ (instr->Mask(0x20C60000) == 0x00840000) ||
+ (instr->Mask(0xA0C60000) == 0x80060000) ||
+ (instr->Mask(0xA0C60000) == 0x00860000) ||
+ (instr->Mask(0xA0C60000) == 0x00460000) ||
+ (instr->Mask(0xA0CE0000) == 0x80860000) ||
+ (instr->Mask(0xA0CE0000) == 0x804E0000) ||
+ (instr->Mask(0xA0CE0000) == 0x000E0000) ||
+ (instr->Mask(0xA0D60000) == 0x00160000) ||
+ (instr->Mask(0xA0D60000) == 0x80560000) ||
+ (instr->Mask(0xA0D60000) == 0x80960000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPIntegerConvert(instr);
+ }
+ } else if (instr->Bits(14, 10) == 16) {
+ const Instr masked_A0DF8000 = instr->Mask(0xA0DF8000);
+ if ((instr->Mask(0x80180000) != 0) ||
+ (masked_A0DF8000 == 0x00020000) ||
+ (masked_A0DF8000 == 0x00030000) ||
+ (masked_A0DF8000 == 0x00068000) ||
+ (masked_A0DF8000 == 0x00428000) ||
+ (masked_A0DF8000 == 0x00430000) ||
+ (masked_A0DF8000 == 0x00468000) ||
+ (instr->Mask(0xA0D80000) == 0x00800000) ||
+ (instr->Mask(0xA0DE0000) == 0x00C00000) ||
+ (instr->Mask(0xA0DF0000) == 0x00C30000) ||
+ (instr->Mask(0xA0DC0000) == 0x00C40000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing1Source(instr);
+ }
+ } else if (instr->Bits(13, 10) == 8) {
+ if ((instr->Bits(15, 14) != 0) ||
+ (instr->Bits(2, 0) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPCompare(instr);
+ }
+ } else if (instr->Bits(12, 10) == 4) {
+ if ((instr->Bits(9, 5) != 0) ||
+ (instr->Mask(0x80800000) != 0x00000000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPImmediate(instr);
+ }
+ } else {
+ if (instr->Mask(0x80800000) != 0x00000000) {
+ VisitUnallocated(instr);
+ } else {
+ switch (instr->Bits(11, 10)) {
+ case 1: {
+ VisitFPConditionalCompare(instr);
+ break;
+ }
+ case 2: {
+ if ((instr->Bits(15, 14) == 0x3) ||
+ (instr->Mask(0x00009000) == 0x00009000) ||
+ (instr->Mask(0x0000A000) == 0x0000A000)) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing2Source(instr);
+ }
+ break;
+ }
+ case 3: {
+ VisitFPConditionalSelect(instr);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ }
+ }
+ }
+ } else {
+ // Bit 30 == 1 has been handled earlier.
+ VIXL_ASSERT(instr->Bit(30) == 0);
+ if (instr->Mask(0xA0800000) != 0) {
+ VisitUnallocated(instr);
+ } else {
+ VisitFPDataProcessing3Source(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+}
+
+
+void Decoder::DecodeNEONLoadStore(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(29, 25) == 0x6);
+ if (instr->Bit(31) == 0) {
+ if ((instr->Bit(24) == 0) && (instr->Bit(21) == 1)) {
+ VisitUnallocated(instr);
+ return;
+ }
+
+ if (instr->Bit(23) == 0) {
+ if (instr->Bits(20, 16) == 0) {
+ if (instr->Bit(24) == 0) {
+ VisitNEONLoadStoreMultiStruct(instr);
+ } else {
+ VisitNEONLoadStoreSingleStruct(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(24) == 0) {
+ VisitNEONLoadStoreMultiStructPostIndex(instr);
+ } else {
+ VisitNEONLoadStoreSingleStructPostIndex(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+}
+
+
+void Decoder::DecodeNEONVectorDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(28, 25) == 0x7);
+ if (instr->Bit(31) == 0) {
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if (instr->Bit(15) == 0) {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEONTable(instr);
+ } else {
+ VisitNEONPerm(instr);
+ }
+ } else {
+ VisitNEONExtract(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ VisitNEONCopy(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEON3Different(instr);
+ } else {
+ if (instr->Bits(18, 17) == 0) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(19) == 0) {
+ VisitNEON2RegMisc(instr);
+ } else {
+ if (instr->Bits(30, 29) == 0x2) {
+ VisitCryptoAES(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(19) == 0) {
+ VisitNEONAcrossLanes(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitNEON3Same(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ VisitNEONByIndexedElement(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ if (instr->Bits(22, 19) == 0) {
+ VisitNEONModifiedImmediate(instr);
+ } else {
+ VisitNEONShiftImmediate(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+}
+
+
+void Decoder::DecodeNEONScalarDataProcessing(const Instruction* instr) {
+ VIXL_ASSERT(instr->Bits(28, 25) == 0xF);
+ if (instr->Bit(24) == 0) {
+ if (instr->Bit(21) == 0) {
+ if (instr->Bit(15) == 0) {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(29) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitCrypto3RegSHA(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bits(23, 22) == 0) {
+ VisitNEONScalarCopy(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ if (instr->Bit(11) == 0) {
+ VisitNEONScalar3Diff(instr);
+ } else {
+ if (instr->Bits(18, 17) == 0) {
+ if (instr->Bit(20) == 0) {
+ if (instr->Bit(19) == 0) {
+ VisitNEONScalar2RegMisc(instr);
+ } else {
+ if (instr->Bit(29) == 0) {
+ VisitCrypto2RegSHA(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(19) == 0) {
+ VisitNEONScalarPairwise(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ } else {
+ VisitNEONScalar3Same(instr);
+ }
+ }
+ } else {
+ if (instr->Bit(10) == 0) {
+ VisitNEONScalarByIndexedElement(instr);
+ } else {
+ if (instr->Bit(23) == 0) {
+ VisitNEONScalarShiftImmediate(instr);
+ } else {
+ VisitUnallocated(instr);
+ }
+ }
+ }
+}
+
+
+#define DEFINE_VISITOR_CALLERS(A) \
+ void Decoder::Visit##A(const Instruction *instr) { \
+ VIXL_ASSERT(instr->Mask(A##FMask) == A##Fixed); \
+ for (auto visitor : visitors_) { \
+ visitor->Visit##A(instr); \
+ } \
+ }
+VISITOR_LIST(DEFINE_VISITOR_CALLERS)
+#undef DEFINE_VISITOR_CALLERS
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Decoder-vixl.h b/js/src/jit/arm64/vixl/Decoder-vixl.h
new file mode 100644
index 0000000000..4aca78618b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Decoder-vixl.h
@@ -0,0 +1,275 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_DECODER_A64_H_
+#define VIXL_A64_DECODER_A64_H_
+
+#include "mozilla/Vector.h"
+
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "js/AllocPolicy.h"
+
+
+// List macro containing all visitors needed by the decoder class.
+
+#define VISITOR_LIST_THAT_RETURN(V) \
+ V(PCRelAddressing) \
+ V(AddSubImmediate) \
+ V(LogicalImmediate) \
+ V(MoveWideImmediate) \
+ V(Bitfield) \
+ V(Extract) \
+ V(UnconditionalBranch) \
+ V(UnconditionalBranchToRegister) \
+ V(CompareBranch) \
+ V(TestBranch) \
+ V(ConditionalBranch) \
+ V(System) \
+ V(Exception) \
+ V(LoadStorePairPostIndex) \
+ V(LoadStorePairOffset) \
+ V(LoadStorePairPreIndex) \
+ V(LoadStorePairNonTemporal) \
+ V(LoadLiteral) \
+ V(LoadStoreUnscaledOffset) \
+ V(LoadStorePostIndex) \
+ V(LoadStorePreIndex) \
+ V(LoadStoreRegisterOffset) \
+ V(LoadStoreUnsignedOffset) \
+ V(LoadStoreExclusive) \
+ V(LogicalShifted) \
+ V(AddSubShifted) \
+ V(AddSubExtended) \
+ V(AddSubWithCarry) \
+ V(ConditionalCompareRegister) \
+ V(ConditionalCompareImmediate) \
+ V(ConditionalSelect) \
+ V(DataProcessing1Source) \
+ V(DataProcessing2Source) \
+ V(DataProcessing3Source) \
+ V(FPCompare) \
+ V(FPConditionalCompare) \
+ V(FPConditionalSelect) \
+ V(FPImmediate) \
+ V(FPDataProcessing1Source) \
+ V(FPDataProcessing2Source) \
+ V(FPDataProcessing3Source) \
+ V(FPIntegerConvert) \
+ V(FPFixedPointConvert) \
+ V(Crypto2RegSHA) \
+ V(Crypto3RegSHA) \
+ V(CryptoAES) \
+ V(NEON2RegMisc) \
+ V(NEON3Different) \
+ V(NEON3Same) \
+ V(NEONAcrossLanes) \
+ V(NEONByIndexedElement) \
+ V(NEONCopy) \
+ V(NEONExtract) \
+ V(NEONLoadStoreMultiStruct) \
+ V(NEONLoadStoreMultiStructPostIndex) \
+ V(NEONLoadStoreSingleStruct) \
+ V(NEONLoadStoreSingleStructPostIndex) \
+ V(NEONModifiedImmediate) \
+ V(NEONScalar2RegMisc) \
+ V(NEONScalar3Diff) \
+ V(NEONScalar3Same) \
+ V(NEONScalarByIndexedElement) \
+ V(NEONScalarCopy) \
+ V(NEONScalarPairwise) \
+ V(NEONScalarShiftImmediate) \
+ V(NEONShiftImmediate) \
+ V(NEONTable) \
+ V(NEONPerm) \
+
+#define VISITOR_LIST_THAT_DONT_RETURN(V) \
+ V(Unallocated) \
+ V(Unimplemented) \
+
+#define VISITOR_LIST(V) \
+ VISITOR_LIST_THAT_RETURN(V) \
+ VISITOR_LIST_THAT_DONT_RETURN(V) \
+
+namespace vixl {
+
+// The Visitor interface. Disassembler and simulator (and other tools)
+// must provide implementations for all of these functions.
+class DecoderVisitor {
+ public:
+ enum VisitorConstness {
+ kConstVisitor,
+ kNonConstVisitor
+ };
+ explicit DecoderVisitor(VisitorConstness constness = kConstVisitor)
+ : constness_(constness) {}
+
+ virtual ~DecoderVisitor() {}
+
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr) = 0;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ bool IsConstVisitor() const { return constness_ == kConstVisitor; }
+ Instruction* MutableInstruction(const Instruction* instr) {
+ VIXL_ASSERT(!IsConstVisitor());
+ return const_cast<Instruction*>(instr);
+ }
+
+ private:
+ const VisitorConstness constness_;
+};
+
+
+class Decoder {
+ public:
+ Decoder() {}
+
+ // Top-level wrappers around the actual decoding function.
+ void Decode(const Instruction* instr) {
+#ifdef DEBUG
+ for (auto visitor : visitors_) {
+ VIXL_ASSERT(visitor->IsConstVisitor());
+ }
+#endif
+ DecodeInstruction(instr);
+ }
+ void Decode(Instruction* instr) {
+ DecodeInstruction(const_cast<const Instruction*>(instr));
+ }
+
+ // Register a new visitor class with the decoder.
+ // Decode() will call the corresponding visitor method from all registered
+ // visitor classes when decoding reaches the leaf node of the instruction
+ // decode tree.
+ // Visitors are called in order.
+ // A visitor can be registered multiple times.
+ //
+ // d.AppendVisitor(V1);
+ // d.AppendVisitor(V2);
+ // d.PrependVisitor(V2);
+ // d.AppendVisitor(V3);
+ //
+ // d.Decode(i);
+ //
+ // will call in order visitor methods in V2, V1, V2, V3.
+ void AppendVisitor(DecoderVisitor* visitor);
+ void PrependVisitor(DecoderVisitor* visitor);
+ // These helpers register `new_visitor` before or after the first instance of
+ // `registered_visiter` in the list.
+ // So if
+ // V1, V2, V1, V2
+ // are registered in this order in the decoder, calls to
+ // d.InsertVisitorAfter(V3, V1);
+ // d.InsertVisitorBefore(V4, V2);
+ // will yield the order
+ // V1, V3, V4, V2, V1, V2
+ //
+ // For more complex modifications of the order of registered visitors, one can
+ // directly access and modify the list of visitors via the `visitors()'
+ // accessor.
+ void InsertVisitorBefore(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+ void InsertVisitorAfter(DecoderVisitor* new_visitor,
+ DecoderVisitor* registered_visitor);
+
+ // Remove all instances of a previously registered visitor class from the list
+ // of visitors stored by the decoder.
+ void RemoveVisitor(DecoderVisitor* visitor);
+
+ #define DECLARE(A) void Visit##A(const Instruction* instr);
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+
+ private:
+ // Decodes an instruction and calls the visitor functions registered with the
+ // Decoder class.
+ void DecodeInstruction(const Instruction* instr);
+
+ // Decode the PC relative addressing instruction, and call the corresponding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x0.
+ void DecodePCRelAddressing(const Instruction* instr);
+
+ // Decode the add/subtract immediate instruction, and call the correspoding
+ // visitors.
+ // On entry, instruction bits 27:24 = 0x1.
+ void DecodeAddSubImmediate(const Instruction* instr);
+
+ // Decode the branch, system command, and exception generation parts of
+ // the instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x4, 0x5, 0x6, 0x7}.
+ void DecodeBranchSystemException(const Instruction* instr);
+
+ // Decode the load and store parts of the instruction tree, and call
+ // the corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x8, 0x9, 0xC, 0xD}.
+ void DecodeLoadStore(const Instruction* instr);
+
+ // Decode the logical immediate and move wide immediate parts of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x2.
+ void DecodeLogical(const Instruction* instr);
+
+ // Decode the bitfield and extraction parts of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 27:24 = 0x3.
+ void DecodeBitfieldExtract(const Instruction* instr);
+
+ // Decode the data processing parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0x1, 0xA, 0xB}.
+ void DecodeDataProcessing(const Instruction* instr);
+
+ // Decode the floating point parts of the instruction tree, and call the
+ // corresponding visitors.
+ // On entry, instruction bits 27:24 = {0xE, 0xF}.
+ void DecodeFP(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) load/store part of the instruction tree,
+ // and call the corresponding visitors.
+ // On entry, instruction bits 29:25 = 0x6.
+ void DecodeNEONLoadStore(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) vector data processing part of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 28:25 = 0x7.
+ void DecodeNEONVectorDataProcessing(const Instruction* instr);
+
+ // Decode the Advanced SIMD (NEON) scalar data processing part of the
+ // instruction tree, and call the corresponding visitors.
+ // On entry, instruction bits 28:25 = 0xF.
+ void DecodeNEONScalarDataProcessing(const Instruction* instr);
+
+ private:
+ // Visitors are registered in a list.
+ mozilla::Vector<DecoderVisitor*, 8, js::SystemAllocPolicy> visitors_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_DECODER_A64_H_
diff --git a/js/src/jit/arm64/vixl/Disasm-vixl.cpp b/js/src/jit/arm64/vixl/Disasm-vixl.cpp
new file mode 100644
index 0000000000..97448654b1
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Disasm-vixl.cpp
@@ -0,0 +1,3519 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Disasm-vixl.h"
+
+#include "mozilla/Sprintf.h"
+#include <cstdlib>
+
+namespace vixl {
+
+Disassembler::Disassembler() {
+ buffer_size_ = 256;
+ buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
+ buffer_pos_ = 0;
+ own_buffer_ = true;
+ code_address_offset_ = 0;
+}
+
+
+Disassembler::Disassembler(char* text_buffer, int buffer_size) {
+ buffer_size_ = buffer_size;
+ buffer_ = text_buffer;
+ buffer_pos_ = 0;
+ own_buffer_ = false;
+ code_address_offset_ = 0;
+}
+
+
+Disassembler::~Disassembler() {
+ if (own_buffer_) {
+ free(buffer_);
+ }
+}
+
+
+char* Disassembler::GetOutput() {
+ return buffer_;
+}
+
+
+void Disassembler::VisitAddSubImmediate(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool stack_op = (rd_is_zr || RnIsZROrSP(instr)) &&
+ (instr->ImmAddSub() == 0) ? true : false;
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rns, 'IAddSub";
+ const char *form_cmp = "'Rns, 'IAddSub";
+ const char *form_mov = "'Rds, 'Rns";
+
+ switch (instr->Mask(AddSubImmediateMask)) {
+ case ADD_w_imm:
+ case ADD_x_imm: {
+ mnemonic = "add";
+ if (stack_op) {
+ mnemonic = "mov";
+ form = form_mov;
+ }
+ break;
+ }
+ case ADDS_w_imm:
+ case ADDS_x_imm: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_imm:
+ case SUB_x_imm: mnemonic = "sub"; break;
+ case SUBS_w_imm:
+ case SUBS_x_imm: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubShifted(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'NDP";
+ const char *form_cmp = "'Rn, 'Rm'NDP";
+ const char *form_neg = "'Rd, 'Rm'NDP";
+
+ switch (instr->Mask(AddSubShiftedMask)) {
+ case ADD_w_shift:
+ case ADD_x_shift: mnemonic = "add"; break;
+ case ADDS_w_shift:
+ case ADDS_x_shift: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_shift:
+ case SUB_x_shift: {
+ mnemonic = "sub";
+ if (rn_is_zr) {
+ mnemonic = "neg";
+ form = form_neg;
+ }
+ break;
+ }
+ case SUBS_w_shift:
+ case SUBS_x_shift: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ } else if (rn_is_zr) {
+ mnemonic = "negs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubExtended(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ const char *mnemonic = "";
+ Extend mode = static_cast<Extend>(instr->ExtendMode());
+ const char *form = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rds, 'Rns, 'Xm'Ext" : "'Rds, 'Rns, 'Wm'Ext";
+ const char *form_cmp = ((mode == UXTX) || (mode == SXTX)) ?
+ "'Rns, 'Xm'Ext" : "'Rns, 'Wm'Ext";
+
+ switch (instr->Mask(AddSubExtendedMask)) {
+ case ADD_w_ext:
+ case ADD_x_ext: mnemonic = "add"; break;
+ case ADDS_w_ext:
+ case ADDS_x_ext: {
+ mnemonic = "adds";
+ if (rd_is_zr) {
+ mnemonic = "cmn";
+ form = form_cmp;
+ }
+ break;
+ }
+ case SUB_w_ext:
+ case SUB_x_ext: mnemonic = "sub"; break;
+ case SUBS_w_ext:
+ case SUBS_x_ext: {
+ mnemonic = "subs";
+ if (rd_is_zr) {
+ mnemonic = "cmp";
+ form = form_cmp;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitAddSubWithCarry(const Instruction* instr) {
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_neg = "'Rd, 'Rm";
+
+ switch (instr->Mask(AddSubWithCarryMask)) {
+ case ADC_w:
+ case ADC_x: mnemonic = "adc"; break;
+ case ADCS_w:
+ case ADCS_x: mnemonic = "adcs"; break;
+ case SBC_w:
+ case SBC_x: {
+ mnemonic = "sbc";
+ if (rn_is_zr) {
+ mnemonic = "ngc";
+ form = form_neg;
+ }
+ break;
+ }
+ case SBCS_w:
+ case SBCS_x: {
+ mnemonic = "sbcs";
+ if (rn_is_zr) {
+ mnemonic = "ngcs";
+ form = form_neg;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLogicalImmediate(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rds, 'Rn, 'ITri";
+
+ if (instr->ImmLogical() == 0) {
+ // The immediate encoded in the instruction is not in the expected format.
+ Format(instr, "unallocated", "(LogicalImmediate)");
+ return;
+ }
+
+ switch (instr->Mask(LogicalImmediateMask)) {
+ case AND_w_imm:
+ case AND_x_imm: mnemonic = "and"; break;
+ case ORR_w_imm:
+ case ORR_x_imm: {
+ mnemonic = "orr";
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ if (rn_is_zr && !IsMovzMovnImm(reg_size, instr->ImmLogical())) {
+ mnemonic = "mov";
+ form = "'Rds, 'ITri";
+ }
+ break;
+ }
+ case EOR_w_imm:
+ case EOR_x_imm: mnemonic = "eor"; break;
+ case ANDS_w_imm:
+ case ANDS_x_imm: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'ITri";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
+ VIXL_ASSERT((reg_size == kXRegSize) ||
+ ((reg_size == kWRegSize) && (value <= 0xffffffff)));
+
+ // Test for movz: 16 bits set at positions 0, 16, 32 or 48.
+ if (((value & UINT64_C(0xffffffffffff0000)) == 0) ||
+ ((value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+ ((value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+ ((value & UINT64_C(0x0000ffffffffffff)) == 0)) {
+ return true;
+ }
+
+ // Test for movn: NOT(16 bits set at positions 0, 16, 32 or 48).
+ if ((reg_size == kXRegSize) &&
+ (((~value & UINT64_C(0xffffffffffff0000)) == 0) ||
+ ((~value & UINT64_C(0xffffffff0000ffff)) == 0) ||
+ ((~value & UINT64_C(0xffff0000ffffffff)) == 0) ||
+ ((~value & UINT64_C(0x0000ffffffffffff)) == 0))) {
+ return true;
+ }
+ if ((reg_size == kWRegSize) &&
+ (((value & 0xffff0000) == 0xffff0000) ||
+ ((value & 0x0000ffff) == 0x0000ffff))) {
+ return true;
+ }
+ return false;
+}
+
+
+void Disassembler::VisitLogicalShifted(const Instruction* instr) {
+ bool rd_is_zr = RdIsZROrSP(instr);
+ bool rn_is_zr = RnIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm'NLo";
+
+ switch (instr->Mask(LogicalShiftedMask)) {
+ case AND_w:
+ case AND_x: mnemonic = "and"; break;
+ case BIC_w:
+ case BIC_x: mnemonic = "bic"; break;
+ case EOR_w:
+ case EOR_x: mnemonic = "eor"; break;
+ case EON_w:
+ case EON_x: mnemonic = "eon"; break;
+ case BICS_w:
+ case BICS_x: mnemonic = "bics"; break;
+ case ANDS_w:
+ case ANDS_x: {
+ mnemonic = "ands";
+ if (rd_is_zr) {
+ mnemonic = "tst";
+ form = "'Rn, 'Rm'NLo";
+ }
+ break;
+ }
+ case ORR_w:
+ case ORR_x: {
+ mnemonic = "orr";
+ if (rn_is_zr && (instr->ImmDPShift() == 0) && (instr->ShiftDP() == LSL)) {
+ mnemonic = "mov";
+ form = "'Rd, 'Rm";
+ }
+ break;
+ }
+ case ORN_w:
+ case ORN_x: {
+ mnemonic = "orn";
+ if (rn_is_zr) {
+ mnemonic = "mvn";
+ form = "'Rd, 'Rm'NLo";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareRegister(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'Rm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareRegisterMask)) {
+ case CCMN_w:
+ case CCMN_x: mnemonic = "ccmn"; break;
+ case CCMP_w:
+ case CCMP_x: mnemonic = "ccmp"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalCompareImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rn, 'IP, 'INzcv, 'Cond";
+
+ switch (instr->Mask(ConditionalCompareImmediateMask)) {
+ case CCMN_w_imm:
+ case CCMN_x_imm: mnemonic = "ccmn"; break;
+ case CCMP_w_imm:
+ case CCMP_x_imm: mnemonic = "ccmp"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitConditionalSelect(const Instruction* instr) {
+ bool rnm_is_zr = (RnIsZROrSP(instr) && RmIsZROrSP(instr));
+ bool rn_is_rm = (instr->Rn() == instr->Rm());
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'Cond";
+ const char *form_test = "'Rd, 'CInv";
+ const char *form_update = "'Rd, 'Rn, 'CInv";
+
+ Condition cond = static_cast<Condition>(instr->Condition());
+ bool invertible_cond = (cond != al) && (cond != nv);
+
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: mnemonic = "csel"; break;
+ case CSINC_w:
+ case CSINC_x: {
+ mnemonic = "csinc";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "cset";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinc";
+ form = form_update;
+ }
+ break;
+ }
+ case CSINV_w:
+ case CSINV_x: {
+ mnemonic = "csinv";
+ if (rnm_is_zr && invertible_cond) {
+ mnemonic = "csetm";
+ form = form_test;
+ } else if (rn_is_rm && invertible_cond) {
+ mnemonic = "cinv";
+ form = form_update;
+ }
+ break;
+ }
+ case CSNEG_w:
+ case CSNEG_x: {
+ mnemonic = "csneg";
+ if (rn_is_rm && invertible_cond) {
+ mnemonic = "cneg";
+ form = form_update;
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitBitfield(const Instruction* instr) {
+ unsigned s = instr->ImmS();
+ unsigned r = instr->ImmR();
+ unsigned rd_size_minus_1 =
+ ((instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize) - 1;
+ const char *mnemonic = "";
+ const char *form = "";
+ const char *form_shift_right = "'Rd, 'Rn, 'IBr";
+ const char *form_extend = "'Rd, 'Wn";
+ const char *form_bfiz = "'Rd, 'Rn, 'IBZ-r, 'IBs+1";
+ const char *form_bfx = "'Rd, 'Rn, 'IBr, 'IBs-r+1";
+ const char *form_lsl = "'Rd, 'Rn, 'IBZ-r";
+
+ switch (instr->Mask(BitfieldMask)) {
+ case SBFM_w:
+ case SBFM_x: {
+ mnemonic = "sbfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "sxtb";
+ } else if (s == 15) {
+ mnemonic = "sxth";
+ } else if ((s == 31) && (instr->SixtyFourBits() == 1)) {
+ mnemonic = "sxtw";
+ } else {
+ form = form_bfx;
+ }
+ } else if (s == rd_size_minus_1) {
+ mnemonic = "asr";
+ form = form_shift_right;
+ } else if (s < r) {
+ mnemonic = "sbfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case UBFM_w:
+ case UBFM_x: {
+ mnemonic = "ubfx";
+ form = form_bfx;
+ if (r == 0) {
+ form = form_extend;
+ if (s == 7) {
+ mnemonic = "uxtb";
+ } else if (s == 15) {
+ mnemonic = "uxth";
+ } else {
+ form = form_bfx;
+ }
+ }
+ if (s == rd_size_minus_1) {
+ mnemonic = "lsr";
+ form = form_shift_right;
+ } else if (r == s + 1) {
+ mnemonic = "lsl";
+ form = form_lsl;
+ } else if (s < r) {
+ mnemonic = "ubfiz";
+ form = form_bfiz;
+ }
+ break;
+ }
+ case BFM_w:
+ case BFM_x: {
+ mnemonic = "bfxil";
+ form = form_bfx;
+ if (s < r) {
+ mnemonic = "bfi";
+ form = form_bfiz;
+ }
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitExtract(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn, 'Rm, 'IExtract";
+
+ switch (instr->Mask(ExtractMask)) {
+ case EXTR_w:
+ case EXTR_x: {
+ if (instr->Rn() == instr->Rm()) {
+ mnemonic = "ror";
+ form = "'Rd, 'Rn, 'IExtract";
+ } else {
+ mnemonic = "extr";
+ }
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitPCRelAddressing(const Instruction* instr) {
+ switch (instr->Mask(PCRelAddressingMask)) {
+ case ADR: Format(instr, "adr", "'Xd, 'AddrPCRelByte"); break;
+ case ADRP: Format(instr, "adrp", "'Xd, 'AddrPCRelPage"); break;
+ default: Format(instr, "unimplemented", "(PCRelAddressing)");
+ }
+}
+
+
+void Disassembler::VisitConditionalBranch(const Instruction* instr) {
+ switch (instr->Mask(ConditionalBranchMask)) {
+ case B_cond: Format(instr, "b.'CBrn", "'TImmCond"); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Disassembler::VisitUnconditionalBranchToRegister(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Xn";
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BR: mnemonic = "br"; break;
+ case BLR: mnemonic = "blr"; break;
+ case RET: {
+ mnemonic = "ret";
+ if (instr->Rn() == kLinkRegCode) {
+ form = NULL;
+ }
+ break;
+ }
+ default: form = "(UnconditionalBranchToRegister)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitUnconditionalBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'TImmUncn";
+
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case B: mnemonic = "b"; break;
+ case BL: mnemonic = "bl"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing1Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Rn";
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(RBIT, "rbit");
+ FORMAT(REV16, "rev16");
+ FORMAT(REV, "rev");
+ FORMAT(CLZ, "clz");
+ FORMAT(CLS, "cls");
+ #undef FORMAT
+ case REV32_x: mnemonic = "rev32"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing2Source(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Rd, 'Rn, 'Rm";
+ const char *form_wwx = "'Wd, 'Wn, 'Xm";
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_w: \
+ case A##_x: mnemonic = B; break;
+ FORMAT(UDIV, "udiv");
+ FORMAT(SDIV, "sdiv");
+ FORMAT(LSLV, "lsl");
+ FORMAT(LSRV, "lsr");
+ FORMAT(ASRV, "asr");
+ FORMAT(RORV, "ror");
+ #undef FORMAT
+ case CRC32B: mnemonic = "crc32b"; break;
+ case CRC32H: mnemonic = "crc32h"; break;
+ case CRC32W: mnemonic = "crc32w"; break;
+ case CRC32X: mnemonic = "crc32x"; form = form_wwx; break;
+ case CRC32CB: mnemonic = "crc32cb"; break;
+ case CRC32CH: mnemonic = "crc32ch"; break;
+ case CRC32CW: mnemonic = "crc32cw"; break;
+ case CRC32CX: mnemonic = "crc32cx"; form = form_wwx; break;
+ default: form = "(DataProcessing2Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitDataProcessing3Source(const Instruction* instr) {
+ bool ra_is_zr = RaIsZROrSP(instr);
+ const char *mnemonic = "";
+ const char *form = "'Xd, 'Wn, 'Wm, 'Xa";
+ const char *form_rrr = "'Rd, 'Rn, 'Rm";
+ const char *form_rrrr = "'Rd, 'Rn, 'Rm, 'Ra";
+ const char *form_xww = "'Xd, 'Wn, 'Wm";
+ const char *form_xxx = "'Xd, 'Xn, 'Xm";
+
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x: {
+ mnemonic = "madd";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mul";
+ form = form_rrr;
+ }
+ break;
+ }
+ case MSUB_w:
+ case MSUB_x: {
+ mnemonic = "msub";
+ form = form_rrrr;
+ if (ra_is_zr) {
+ mnemonic = "mneg";
+ form = form_rrr;
+ }
+ break;
+ }
+ case SMADDL_x: {
+ mnemonic = "smaddl";
+ if (ra_is_zr) {
+ mnemonic = "smull";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMSUBL_x: {
+ mnemonic = "smsubl";
+ if (ra_is_zr) {
+ mnemonic = "smnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMADDL_x: {
+ mnemonic = "umaddl";
+ if (ra_is_zr) {
+ mnemonic = "umull";
+ form = form_xww;
+ }
+ break;
+ }
+ case UMSUBL_x: {
+ mnemonic = "umsubl";
+ if (ra_is_zr) {
+ mnemonic = "umnegl";
+ form = form_xww;
+ }
+ break;
+ }
+ case SMULH_x: {
+ mnemonic = "smulh";
+ form = form_xxx;
+ break;
+ }
+ case UMULH_x: {
+ mnemonic = "umulh";
+ form = form_xxx;
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCompareBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rt, 'TImmCmpa";
+
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w:
+ case CBZ_x: mnemonic = "cbz"; break;
+ case CBNZ_w:
+ case CBNZ_x: mnemonic = "cbnz"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitTestBranch(const Instruction* instr) {
+ const char *mnemonic = "";
+ // If the top bit of the immediate is clear, the tested register is
+ // disassembled as Wt, otherwise Xt. As the top bit of the immediate is
+ // encoded in bit 31 of the instruction, we can reuse the Rt form, which
+ // uses bit 31 (normally "sf") to choose the register size.
+ const char *form = "'Rt, 'IS, 'TImmTest";
+
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: mnemonic = "tbz"; break;
+ case TBNZ: mnemonic = "tbnz"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'IMoveImm";
+
+ // Print the shift separately for movk, to make it clear which half word will
+ // be overwritten. Movn and movz print the computed immediate, which includes
+ // shift calculation.
+ switch (instr->Mask(MoveWideImmediateMask)) {
+ case MOVN_w:
+ case MOVN_x:
+ if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
+ if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
+ mnemonic = "movn";
+ } else {
+ mnemonic = "mov";
+ form = "'Rd, 'IMoveNeg";
+ }
+ } else {
+ mnemonic = "movn";
+ }
+ break;
+ case MOVZ_w:
+ case MOVZ_x:
+ if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
+ mnemonic = "mov";
+ else
+ mnemonic = "movz";
+ break;
+ case MOVK_w:
+ case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_LIST(V) \
+ V(STRB_w, "strb", "'Wt") \
+ V(STRH_w, "strh", "'Wt") \
+ V(STR_w, "str", "'Wt") \
+ V(STR_x, "str", "'Xt") \
+ V(LDRB_w, "ldrb", "'Wt") \
+ V(LDRH_w, "ldrh", "'Wt") \
+ V(LDR_w, "ldr", "'Wt") \
+ V(LDR_x, "ldr", "'Xt") \
+ V(LDRSB_x, "ldrsb", "'Xt") \
+ V(LDRSH_x, "ldrsh", "'Xt") \
+ V(LDRSW_x, "ldrsw", "'Xt") \
+ V(LDRSB_w, "ldrsb", "'Wt") \
+ V(LDRSH_w, "ldrsh", "'Wt") \
+ V(STR_b, "str", "'Bt") \
+ V(STR_h, "str", "'Ht") \
+ V(STR_s, "str", "'St") \
+ V(STR_d, "str", "'Dt") \
+ V(LDR_b, "ldr", "'Bt") \
+ V(LDR_h, "ldr", "'Ht") \
+ V(LDR_s, "ldr", "'St") \
+ V(LDR_d, "ldr", "'Dt") \
+ V(STR_q, "str", "'Qt") \
+ V(LDR_q, "ldr", "'Qt")
+
+void Disassembler::VisitLoadStorePreIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePreIndex)";
+
+ switch (instr->Mask(LoadStorePreIndexMask)) {
+ #define LS_PREINDEX(A, B, C) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILS]!"; break;
+ LOAD_STORE_LIST(LS_PREINDEX)
+ #undef LS_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePostIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePostIndex)";
+
+ switch (instr->Mask(LoadStorePostIndexMask)) {
+ #define LS_POSTINDEX(A, B, C) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILS"; break;
+ LOAD_STORE_LIST(LS_POSTINDEX)
+ #undef LS_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreUnsignedOffset)";
+
+ switch (instr->Mask(LoadStoreUnsignedOffsetMask)) {
+ #define LS_UNSIGNEDOFFSET(A, B, C) \
+ case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
+ LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
+ #undef LS_UNSIGNEDOFFSET
+ case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStoreRegisterOffset)";
+
+ switch (instr->Mask(LoadStoreRegisterOffsetMask)) {
+ #define LS_REGISTEROFFSET(A, B, C) \
+ case A##_reg: mnemonic = B; form = C ", ['Xns, 'Offsetreg]"; break;
+ LOAD_STORE_LIST(LS_REGISTEROFFSET)
+ #undef LS_REGISTEROFFSET
+ case PRFM_reg: mnemonic = "prfm"; form = "'PrefOp, ['Xns, 'Offsetreg]";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Wt, ['Xns'ILS]";
+ const char *form_x = "'Xt, ['Xns'ILS]";
+ const char *form_b = "'Bt, ['Xns'ILS]";
+ const char *form_h = "'Ht, ['Xns'ILS]";
+ const char *form_s = "'St, ['Xns'ILS]";
+ const char *form_d = "'Dt, ['Xns'ILS]";
+ const char *form_q = "'Qt, ['Xns'ILS]";
+ const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
+
+ switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
+ case STURB_w: mnemonic = "sturb"; break;
+ case STURH_w: mnemonic = "sturh"; break;
+ case STUR_w: mnemonic = "stur"; break;
+ case STUR_x: mnemonic = "stur"; form = form_x; break;
+ case STUR_b: mnemonic = "stur"; form = form_b; break;
+ case STUR_h: mnemonic = "stur"; form = form_h; break;
+ case STUR_s: mnemonic = "stur"; form = form_s; break;
+ case STUR_d: mnemonic = "stur"; form = form_d; break;
+ case STUR_q: mnemonic = "stur"; form = form_q; break;
+ case LDURB_w: mnemonic = "ldurb"; break;
+ case LDURH_w: mnemonic = "ldurh"; break;
+ case LDUR_w: mnemonic = "ldur"; break;
+ case LDUR_x: mnemonic = "ldur"; form = form_x; break;
+ case LDUR_b: mnemonic = "ldur"; form = form_b; break;
+ case LDUR_h: mnemonic = "ldur"; form = form_h; break;
+ case LDUR_s: mnemonic = "ldur"; form = form_s; break;
+ case LDUR_d: mnemonic = "ldur"; form = form_d; break;
+ case LDUR_q: mnemonic = "ldur"; form = form_q; break;
+ case LDURSB_x: form = form_x; VIXL_FALLTHROUGH();
+ case LDURSB_w: mnemonic = "ldursb"; break;
+ case LDURSH_x: form = form_x; VIXL_FALLTHROUGH();
+ case LDURSH_w: mnemonic = "ldursh"; break;
+ case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
+ case PRFUM: mnemonic = "prfum"; form = form_prefetch; break;
+ default: form = "(LoadStoreUnscaledOffset)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadLiteral(const Instruction* instr) {
+ const char *mnemonic = "ldr";
+ const char *form = "(LoadLiteral)";
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit: form = "'Wt, 'ILLiteral 'LValue"; break;
+ case LDR_x_lit: form = "'Xt, 'ILLiteral 'LValue"; break;
+ case LDR_s_lit: form = "'St, 'ILLiteral 'LValue"; break;
+ case LDR_d_lit: form = "'Dt, 'ILLiteral 'LValue"; break;
+ case LDR_q_lit: form = "'Qt, 'ILLiteral 'LValue"; break;
+ case LDRSW_x_lit: {
+ mnemonic = "ldrsw";
+ form = "'Xt, 'ILLiteral 'LValue";
+ break;
+ }
+ case PRFM_lit: {
+ mnemonic = "prfm";
+ form = "'PrefOp, 'ILLiteral 'LValue";
+ break;
+ }
+ default: mnemonic = "unimplemented";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+#define LOAD_STORE_PAIR_LIST(V) \
+ V(STP_w, "stp", "'Wt, 'Wt2", "2") \
+ V(LDP_w, "ldp", "'Wt, 'Wt2", "2") \
+ V(LDPSW_x, "ldpsw", "'Xt, 'Xt2", "2") \
+ V(STP_x, "stp", "'Xt, 'Xt2", "3") \
+ V(LDP_x, "ldp", "'Xt, 'Xt2", "3") \
+ V(STP_s, "stp", "'St, 'St2", "2") \
+ V(LDP_s, "ldp", "'St, 'St2", "2") \
+ V(STP_d, "stp", "'Dt, 'Dt2", "3") \
+ V(LDP_d, "ldp", "'Dt, 'Dt2", "3") \
+ V(LDP_q, "ldp", "'Qt, 'Qt2", "4") \
+ V(STP_q, "stp", "'Qt, 'Qt2", "4")
+
+void Disassembler::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPostIndex)";
+
+ switch (instr->Mask(LoadStorePairPostIndexMask)) {
+ #define LSP_POSTINDEX(A, B, C, D) \
+ case A##_post: mnemonic = B; form = C ", ['Xns]'ILP" D; break;
+ LOAD_STORE_PAIR_LIST(LSP_POSTINDEX)
+ #undef LSP_POSTINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairPreIndex)";
+
+ switch (instr->Mask(LoadStorePairPreIndexMask)) {
+ #define LSP_PREINDEX(A, B, C, D) \
+ case A##_pre: mnemonic = B; form = C ", ['Xns'ILP" D "]!"; break;
+ LOAD_STORE_PAIR_LIST(LSP_PREINDEX)
+ #undef LSP_PREINDEX
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairOffset(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(LoadStorePairOffset)";
+
+ switch (instr->Mask(LoadStorePairOffsetMask)) {
+ #define LSP_OFFSET(A, B, C, D) \
+ case A##_off: mnemonic = B; form = C ", ['Xns'ILP" D "]"; break;
+ LOAD_STORE_PAIR_LIST(LSP_OFFSET)
+ #undef LSP_OFFSET
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStorePairNonTemporalMask)) {
+ case STNP_w: mnemonic = "stnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break;
+ case LDNP_w: mnemonic = "ldnp"; form = "'Wt, 'Wt2, ['Xns'ILP2]"; break;
+ case STNP_x: mnemonic = "stnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break;
+ case LDNP_x: mnemonic = "ldnp"; form = "'Xt, 'Xt2, ['Xns'ILP3]"; break;
+ case STNP_s: mnemonic = "stnp"; form = "'St, 'St2, ['Xns'ILP2]"; break;
+ case LDNP_s: mnemonic = "ldnp"; form = "'St, 'St2, ['Xns'ILP2]"; break;
+ case STNP_d: mnemonic = "stnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break;
+ case LDNP_d: mnemonic = "ldnp"; form = "'Dt, 'Dt2, ['Xns'ILP3]"; break;
+ case STNP_q: mnemonic = "stnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break;
+ case LDNP_q: mnemonic = "ldnp"; form = "'Qt, 'Qt2, ['Xns'ILP4]"; break;
+ default: form = "(LoadStorePairNonTemporal)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitLoadStoreExclusive(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form;
+
+ switch (instr->Mask(LoadStoreExclusiveMask)) {
+ case STXRB_w: mnemonic = "stxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STXRH_w: mnemonic = "stxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STXR_w: mnemonic = "stxr"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STXR_x: mnemonic = "stxr"; form = "'Ws, 'Xt, ['Xns]"; break;
+ case LDXRB_w: mnemonic = "ldxrb"; form = "'Wt, ['Xns]"; break;
+ case LDXRH_w: mnemonic = "ldxrh"; form = "'Wt, ['Xns]"; break;
+ case LDXR_w: mnemonic = "ldxr"; form = "'Wt, ['Xns]"; break;
+ case LDXR_x: mnemonic = "ldxr"; form = "'Xt, ['Xns]"; break;
+ case STXP_w: mnemonic = "stxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
+ case STXP_x: mnemonic = "stxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
+ case LDXP_w: mnemonic = "ldxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
+ case LDXP_x: mnemonic = "ldxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
+ case STLXRB_w: mnemonic = "stlxrb"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STLXRH_w: mnemonic = "stlxrh"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STLXR_w: mnemonic = "stlxr"; form = "'Ws, 'Wt, ['Xns]"; break;
+ case STLXR_x: mnemonic = "stlxr"; form = "'Ws, 'Xt, ['Xns]"; break;
+ case LDAXRB_w: mnemonic = "ldaxrb"; form = "'Wt, ['Xns]"; break;
+ case LDAXRH_w: mnemonic = "ldaxrh"; form = "'Wt, ['Xns]"; break;
+ case LDAXR_w: mnemonic = "ldaxr"; form = "'Wt, ['Xns]"; break;
+ case LDAXR_x: mnemonic = "ldaxr"; form = "'Xt, ['Xns]"; break;
+ case STLXP_w: mnemonic = "stlxp"; form = "'Ws, 'Wt, 'Wt2, ['Xns]"; break;
+ case STLXP_x: mnemonic = "stlxp"; form = "'Ws, 'Xt, 'Xt2, ['Xns]"; break;
+ case LDAXP_w: mnemonic = "ldaxp"; form = "'Wt, 'Wt2, ['Xns]"; break;
+ case LDAXP_x: mnemonic = "ldaxp"; form = "'Xt, 'Xt2, ['Xns]"; break;
+ case STLRB_w: mnemonic = "stlrb"; form = "'Wt, ['Xns]"; break;
+ case STLRH_w: mnemonic = "stlrh"; form = "'Wt, ['Xns]"; break;
+ case STLR_w: mnemonic = "stlr"; form = "'Wt, ['Xns]"; break;
+ case STLR_x: mnemonic = "stlr"; form = "'Xt, ['Xns]"; break;
+ case LDARB_w: mnemonic = "ldarb"; form = "'Wt, ['Xns]"; break;
+ case LDARH_w: mnemonic = "ldarh"; form = "'Wt, ['Xns]"; break;
+ case LDAR_w: mnemonic = "ldar"; form = "'Wt, ['Xns]"; break;
+ case LDAR_x: mnemonic = "ldar"; form = "'Xt, ['Xns]"; break;
+ default: form = "(LoadStoreExclusive)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPCompare(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fn, 'Fm";
+ const char *form_zero = "'Fn, #0.0";
+
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMP_s_zero:
+ case FCMP_d_zero: form = form_zero; VIXL_FALLTHROUGH();
+ case FCMP_s:
+ case FCMP_d: mnemonic = "fcmp"; break;
+ case FCMPE_s_zero:
+ case FCMPE_d_zero: form = form_zero; VIXL_FALLTHROUGH();
+ case FCMPE_s:
+ case FCMPE_d: mnemonic = "fcmpe"; break;
+ default: form = "(FPCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalCompare(const Instruction* instr) {
+ const char *mnemonic = "unmplemented";
+ const char *form = "'Fn, 'Fm, 'INzcv, 'Cond";
+
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMP_s:
+ case FCCMP_d: mnemonic = "fccmp"; break;
+ case FCCMPE_s:
+ case FCCMPE_d: mnemonic = "fccmpe"; break;
+ default: form = "(FPConditionalCompare)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPConditionalSelect(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Cond";
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s:
+ case FCSEL_d: mnemonic = "fcsel"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing1Source(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Fd, 'Fn";
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMOV, "fmov");
+ FORMAT(FABS, "fabs");
+ FORMAT(FNEG, "fneg");
+ FORMAT(FSQRT, "fsqrt");
+ FORMAT(FRINTN, "frintn");
+ FORMAT(FRINTP, "frintp");
+ FORMAT(FRINTM, "frintm");
+ FORMAT(FRINTZ, "frintz");
+ FORMAT(FRINTA, "frinta");
+ FORMAT(FRINTX, "frintx");
+ FORMAT(FRINTI, "frinti");
+ #undef FORMAT
+ case FCVT_ds: mnemonic = "fcvt"; form = "'Dd, 'Sn"; break;
+ case FCVT_sd: mnemonic = "fcvt"; form = "'Sd, 'Dn"; break;
+ case FCVT_hs: mnemonic = "fcvt"; form = "'Hd, 'Sn"; break;
+ case FCVT_sh: mnemonic = "fcvt"; form = "'Sd, 'Hn"; break;
+ case FCVT_dh: mnemonic = "fcvt"; form = "'Dd, 'Hn"; break;
+ case FCVT_hd: mnemonic = "fcvt"; form = "'Hd, 'Dn"; break;
+ default: form = "(FPDataProcessing1Source)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing2Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm";
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMUL, "fmul");
+ FORMAT(FDIV, "fdiv");
+ FORMAT(FADD, "fadd");
+ FORMAT(FSUB, "fsub");
+ FORMAT(FMAX, "fmax");
+ FORMAT(FMIN, "fmin");
+ FORMAT(FMAXNM, "fmaxnm");
+ FORMAT(FMINNM, "fminnm");
+ FORMAT(FNMUL, "fnmul");
+ #undef FORMAT
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPDataProcessing3Source(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Fd, 'Fn, 'Fm, 'Fa";
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ #define FORMAT(A, B) \
+ case A##_s: \
+ case A##_d: mnemonic = B; break;
+ FORMAT(FMADD, "fmadd");
+ FORMAT(FMSUB, "fmsub");
+ FORMAT(FNMADD, "fnmadd");
+ FORMAT(FNMSUB, "fnmsub");
+ #undef FORMAT
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPImmediate(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "(FPImmediate)";
+
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: mnemonic = "fmov"; form = "'Sd, 'IFPSingle"; break;
+ case FMOV_d_imm: mnemonic = "fmov"; form = "'Dd, 'IFPDouble"; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPIntegerConvert(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(FPIntegerConvert)";
+ const char *form_rf = "'Rd, 'Fn";
+ const char *form_fr = "'Fd, 'Rn";
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FMOV_ws:
+ case FMOV_xd: mnemonic = "fmov"; form = form_rf; break;
+ case FMOV_sw:
+ case FMOV_dx: mnemonic = "fmov"; form = form_fr; break;
+ case FMOV_d1_x: mnemonic = "fmov"; form = "'Vd.D[1], 'Rn"; break;
+ case FMOV_x_d1: mnemonic = "fmov"; form = "'Rd, 'Vn.D[1]"; break;
+ case FCVTAS_ws:
+ case FCVTAS_xs:
+ case FCVTAS_wd:
+ case FCVTAS_xd: mnemonic = "fcvtas"; form = form_rf; break;
+ case FCVTAU_ws:
+ case FCVTAU_xs:
+ case FCVTAU_wd:
+ case FCVTAU_xd: mnemonic = "fcvtau"; form = form_rf; break;
+ case FCVTMS_ws:
+ case FCVTMS_xs:
+ case FCVTMS_wd:
+ case FCVTMS_xd: mnemonic = "fcvtms"; form = form_rf; break;
+ case FCVTMU_ws:
+ case FCVTMU_xs:
+ case FCVTMU_wd:
+ case FCVTMU_xd: mnemonic = "fcvtmu"; form = form_rf; break;
+ case FCVTNS_ws:
+ case FCVTNS_xs:
+ case FCVTNS_wd:
+ case FCVTNS_xd: mnemonic = "fcvtns"; form = form_rf; break;
+ case FCVTNU_ws:
+ case FCVTNU_xs:
+ case FCVTNU_wd:
+ case FCVTNU_xd: mnemonic = "fcvtnu"; form = form_rf; break;
+ case FCVTZU_xd:
+ case FCVTZU_ws:
+ case FCVTZU_wd:
+ case FCVTZU_xs: mnemonic = "fcvtzu"; form = form_rf; break;
+ case FCVTZS_xd:
+ case FCVTZS_wd:
+ case FCVTZS_xs:
+ case FCVTZS_ws: mnemonic = "fcvtzs"; form = form_rf; break;
+ case FCVTPU_xd:
+ case FCVTPU_ws:
+ case FCVTPU_wd:
+ case FCVTPU_xs: mnemonic = "fcvtpu"; form = form_rf; break;
+ case FCVTPS_xd:
+ case FCVTPS_wd:
+ case FCVTPS_xs:
+ case FCVTPS_ws: mnemonic = "fcvtps"; form = form_rf; break;
+ case SCVTF_sw:
+ case SCVTF_sx:
+ case SCVTF_dw:
+ case SCVTF_dx: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw:
+ case UCVTF_sx:
+ case UCVTF_dw:
+ case UCVTF_dx: mnemonic = "ucvtf"; form = form_fr; break;
+ case FJCVTZS: mnemonic = "fjcvtzs"; form = form_rf; break;
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitFPFixedPointConvert(const Instruction* instr) {
+ const char *mnemonic = "";
+ const char *form = "'Rd, 'Fn, 'IFPFBits";
+ const char *form_fr = "'Fd, 'Rn, 'IFPFBits";
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ case FCVTZS_ws_fixed:
+ case FCVTZS_xs_fixed:
+ case FCVTZS_wd_fixed:
+ case FCVTZS_xd_fixed: mnemonic = "fcvtzs"; break;
+ case FCVTZU_ws_fixed:
+ case FCVTZU_xs_fixed:
+ case FCVTZU_wd_fixed:
+ case FCVTZU_xd_fixed: mnemonic = "fcvtzu"; break;
+ case SCVTF_sw_fixed:
+ case SCVTF_sx_fixed:
+ case SCVTF_dw_fixed:
+ case SCVTF_dx_fixed: mnemonic = "scvtf"; form = form_fr; break;
+ case UCVTF_sw_fixed:
+ case UCVTF_sx_fixed:
+ case UCVTF_dw_fixed:
+ case UCVTF_dx_fixed: mnemonic = "ucvtf"; form = form_fr; break;
+ default: VIXL_UNREACHABLE();
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitSystem(const Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ const char *mnemonic = "unimplemented";
+ const char *form = "(System)";
+
+ if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
+ switch (instr->Mask(SystemExclusiveMonitorMask)) {
+ case CLREX: {
+ mnemonic = "clrex";
+ form = (instr->CRm() == 0xf) ? NULL : "'IX";
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ mnemonic = "mrs";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "'Xt, nzcv"; break;
+ case FPCR: form = "'Xt, fpcr"; break;
+ default: form = "'Xt, (unknown)"; break;
+ }
+ break;
+ }
+ case MSR: {
+ mnemonic = "msr";
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: form = "nzcv, 'Xt"; break;
+ case FPCR: form = "fpcr, 'Xt"; break;
+ default: form = "(unknown), 'Xt"; break;
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ switch (instr->ImmHint()) {
+ case NOP: {
+ mnemonic = "nop";
+ form = NULL;
+ break;
+ }
+ case CSDB: {
+ mnemonic = "csdb";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ switch (instr->Mask(MemBarrierMask)) {
+ case DMB: {
+ mnemonic = "dmb";
+ form = "'M";
+ break;
+ }
+ case DSB: {
+ mnemonic = "dsb";
+ form = "'M";
+ break;
+ }
+ case ISB: {
+ mnemonic = "isb";
+ form = NULL;
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysFMask) == SystemSysFixed) {
+ switch (instr->SysOp()) {
+ case IVAU:
+ mnemonic = "ic";
+ form = "ivau, 'Xt";
+ break;
+ case CVAC:
+ mnemonic = "dc";
+ form = "cvac, 'Xt";
+ break;
+ case CVAU:
+ mnemonic = "dc";
+ form = "cvau, 'Xt";
+ break;
+ case CIVAC:
+ mnemonic = "dc";
+ form = "civac, 'Xt";
+ break;
+ case ZVA:
+ mnemonic = "dc";
+ form = "zva, 'Xt";
+ break;
+ default:
+ mnemonic = "sys";
+ if (instr->Rt() == 31) {
+ form = "'G1, 'Kn, 'Km, 'G2";
+ } else {
+ form = "'G1, 'Kn, 'Km, 'G2, 'Xt";
+ }
+ break;
+ }
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitException(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'IDebug";
+
+ switch (instr->Mask(ExceptionMask)) {
+ case HLT: mnemonic = "hlt"; break;
+ case BRK: mnemonic = "brk"; break;
+ case SVC: mnemonic = "svc"; break;
+ case HVC: mnemonic = "hvc"; break;
+ case SMC: mnemonic = "smc"; break;
+ case DCPS1: mnemonic = "dcps1"; form = "{'IDebug}"; break;
+ case DCPS2: mnemonic = "dcps2"; form = "{'IDebug}"; break;
+ case DCPS3: mnemonic = "dcps3"; form = "{'IDebug}"; break;
+ default: form = "(Exception)";
+ }
+ Format(instr, mnemonic, form);
+}
+
+
+void Disassembler::VisitCrypto2RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitCrypto3RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitCryptoAES(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Disassembler::VisitNEON2RegMisc(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s";
+ const char *form_cmp_zero = "'Vd.%s, 'Vn.%s, #0";
+ const char *form_fcmp_zero = "'Vd.%s, 'Vn.%s, #0.0";
+ NEONFormatDecoder nfd(instr);
+
+ static const NEONFormatMap map_lp_ta = {
+ {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+
+ static const NEONFormatMap map_cvt_ta = {
+ {22}, {NF_4S, NF_2D}
+ };
+
+ static const NEONFormatMap map_cvt_tb = {
+ {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S}
+ };
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_REV64: mnemonic = "rev64"; break;
+ case NEON_REV32: mnemonic = "rev32"; break;
+ case NEON_REV16: mnemonic = "rev16"; break;
+ case NEON_SADDLP:
+ mnemonic = "saddlp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_UADDLP:
+ mnemonic = "uaddlp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_SUQADD: mnemonic = "suqadd"; break;
+ case NEON_USQADD: mnemonic = "usqadd"; break;
+ case NEON_CLS: mnemonic = "cls"; break;
+ case NEON_CLZ: mnemonic = "clz"; break;
+ case NEON_CNT: mnemonic = "cnt"; break;
+ case NEON_SADALP:
+ mnemonic = "sadalp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_UADALP:
+ mnemonic = "uadalp";
+ nfd.SetFormatMap(0, &map_lp_ta);
+ break;
+ case NEON_SQABS: mnemonic = "sqabs"; break;
+ case NEON_SQNEG: mnemonic = "sqneg"; break;
+ case NEON_CMGT_zero: mnemonic = "cmgt"; form = form_cmp_zero; break;
+ case NEON_CMGE_zero: mnemonic = "cmge"; form = form_cmp_zero; break;
+ case NEON_CMEQ_zero: mnemonic = "cmeq"; form = form_cmp_zero; break;
+ case NEON_CMLE_zero: mnemonic = "cmle"; form = form_cmp_zero; break;
+ case NEON_CMLT_zero: mnemonic = "cmlt"; form = form_cmp_zero; break;
+ case NEON_ABS: mnemonic = "abs"; break;
+ case NEON_NEG: mnemonic = "neg"; break;
+ case NEON_RBIT_NOT:
+ switch (instr->FPType()) {
+ case 0: mnemonic = "mvn"; break;
+ case 1: mnemonic = "rbit"; break;
+ default: form = "(NEON2RegMisc)";
+ }
+ nfd.SetFormatMaps(nfd.LogicalFormatMap());
+ break;
+ }
+ } else {
+ // These instructions all use a one bit size field, except XTN, SQXTUN,
+ // SHLL, SQXTN and UQXTN, which use a two bit size field.
+ nfd.SetFormatMaps(nfd.FPFormatMap());
+ switch (instr->Mask(NEON2RegMiscFPMask)) {
+ case NEON_FABS: mnemonic = "fabs"; break;
+ case NEON_FNEG: mnemonic = "fneg"; break;
+ case NEON_FCVTN:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtn2" : "fcvtn";
+ nfd.SetFormatMap(0, &map_cvt_tb);
+ nfd.SetFormatMap(1, &map_cvt_ta);
+ break;
+ case NEON_FCVTXN:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtxn2" : "fcvtxn";
+ nfd.SetFormatMap(0, &map_cvt_tb);
+ nfd.SetFormatMap(1, &map_cvt_ta);
+ break;
+ case NEON_FCVTL:
+ mnemonic = instr->Mask(NEON_Q) ? "fcvtl2" : "fcvtl";
+ nfd.SetFormatMap(0, &map_cvt_ta);
+ nfd.SetFormatMap(1, &map_cvt_tb);
+ break;
+ case NEON_FRINTN: mnemonic = "frintn"; break;
+ case NEON_FRINTA: mnemonic = "frinta"; break;
+ case NEON_FRINTP: mnemonic = "frintp"; break;
+ case NEON_FRINTM: mnemonic = "frintm"; break;
+ case NEON_FRINTX: mnemonic = "frintx"; break;
+ case NEON_FRINTZ: mnemonic = "frintz"; break;
+ case NEON_FRINTI: mnemonic = "frinti"; break;
+ case NEON_FCVTNS: mnemonic = "fcvtns"; break;
+ case NEON_FCVTNU: mnemonic = "fcvtnu"; break;
+ case NEON_FCVTPS: mnemonic = "fcvtps"; break;
+ case NEON_FCVTPU: mnemonic = "fcvtpu"; break;
+ case NEON_FCVTMS: mnemonic = "fcvtms"; break;
+ case NEON_FCVTMU: mnemonic = "fcvtmu"; break;
+ case NEON_FCVTZS: mnemonic = "fcvtzs"; break;
+ case NEON_FCVTZU: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTAS: mnemonic = "fcvtas"; break;
+ case NEON_FCVTAU: mnemonic = "fcvtau"; break;
+ case NEON_FSQRT: mnemonic = "fsqrt"; break;
+ case NEON_SCVTF: mnemonic = "scvtf"; break;
+ case NEON_UCVTF: mnemonic = "ucvtf"; break;
+ case NEON_URSQRTE: mnemonic = "ursqrte"; break;
+ case NEON_URECPE: mnemonic = "urecpe"; break;
+ case NEON_FRSQRTE: mnemonic = "frsqrte"; break;
+ case NEON_FRECPE: mnemonic = "frecpe"; break;
+ case NEON_FCMGT_zero: mnemonic = "fcmgt"; form = form_fcmp_zero; break;
+ case NEON_FCMGE_zero: mnemonic = "fcmge"; form = form_fcmp_zero; break;
+ case NEON_FCMEQ_zero: mnemonic = "fcmeq"; form = form_fcmp_zero; break;
+ case NEON_FCMLE_zero: mnemonic = "fcmle"; form = form_fcmp_zero; break;
+ case NEON_FCMLT_zero: mnemonic = "fcmlt"; form = form_fcmp_zero; break;
+ default:
+ if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+ (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_XTN: mnemonic = "xtn"; break;
+ case NEON_SQXTN: mnemonic = "sqxtn"; break;
+ case NEON_UQXTN: mnemonic = "uqxtn"; break;
+ case NEON_SQXTUN: mnemonic = "sqxtun"; break;
+ case NEON_SHLL:
+ mnemonic = "shll";
+ nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(1, nfd.IntegerFormatMap());
+ switch (instr->NEONSize()) {
+ case 0: form = "'Vd.%s, 'Vn.%s, #8"; break;
+ case 1: form = "'Vd.%s, 'Vn.%s, #16"; break;
+ case 2: form = "'Vd.%s, 'Vn.%s, #32"; break;
+ default: form = "(NEON2RegMisc)";
+ }
+ }
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+ return;
+ } else {
+ form = "(NEON2RegMisc)";
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEON3Same(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+ NEONFormatDecoder nfd(instr);
+
+ if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+ switch (instr->Mask(NEON3SameLogicalMask)) {
+ case NEON_AND: mnemonic = "and"; break;
+ case NEON_ORR:
+ mnemonic = "orr";
+ if (instr->Rm() == instr->Rn()) {
+ mnemonic = "mov";
+ form = "'Vd.%s, 'Vn.%s";
+ }
+ break;
+ case NEON_ORN: mnemonic = "orn"; break;
+ case NEON_EOR: mnemonic = "eor"; break;
+ case NEON_BIC: mnemonic = "bic"; break;
+ case NEON_BIF: mnemonic = "bif"; break;
+ case NEON_BIT: mnemonic = "bit"; break;
+ case NEON_BSL: mnemonic = "bsl"; break;
+ default: form = "(NEON3Same)";
+ }
+ nfd.SetFormatMaps(nfd.LogicalFormatMap());
+ } else {
+ static const char *mnemonics[] = {
+ "shadd", "uhadd", "shadd", "uhadd",
+ "sqadd", "uqadd", "sqadd", "uqadd",
+ "srhadd", "urhadd", "srhadd", "urhadd",
+ NULL, NULL, NULL, NULL, // Handled by logical cases above.
+ "shsub", "uhsub", "shsub", "uhsub",
+ "sqsub", "uqsub", "sqsub", "uqsub",
+ "cmgt", "cmhi", "cmgt", "cmhi",
+ "cmge", "cmhs", "cmge", "cmhs",
+ "sshl", "ushl", "sshl", "ushl",
+ "sqshl", "uqshl", "sqshl", "uqshl",
+ "srshl", "urshl", "srshl", "urshl",
+ "sqrshl", "uqrshl", "sqrshl", "uqrshl",
+ "smax", "umax", "smax", "umax",
+ "smin", "umin", "smin", "umin",
+ "sabd", "uabd", "sabd", "uabd",
+ "saba", "uaba", "saba", "uaba",
+ "add", "sub", "add", "sub",
+ "cmtst", "cmeq", "cmtst", "cmeq",
+ "mla", "mls", "mla", "mls",
+ "mul", "pmul", "mul", "pmul",
+ "smaxp", "umaxp", "smaxp", "umaxp",
+ "sminp", "uminp", "sminp", "uminp",
+ "sqdmulh", "sqrdmulh", "sqdmulh", "sqrdmulh",
+ "addp", "unallocated", "addp", "unallocated",
+ "fmaxnm", "fmaxnmp", "fminnm", "fminnmp",
+ "fmla", "unallocated", "fmls", "unallocated",
+ "fadd", "faddp", "fsub", "fabd",
+ "fmulx", "fmul", "unallocated", "unallocated",
+ "fcmeq", "fcmge", "unallocated", "fcmgt",
+ "unallocated", "facge", "unallocated", "facgt",
+ "fmax", "fmaxp", "fmin", "fminp",
+ "frecps", "fdiv", "frsqrts", "unallocated"};
+
+ // Operation is determined by the opcode bits (15-11), the top bit of
+ // size (23) and the U bit (29).
+ unsigned index = (instr->Bits(15, 11) << 2) | (instr->Bit(23) << 1) |
+ instr->Bit(29);
+ VIXL_ASSERT(index < (sizeof(mnemonics) / sizeof(mnemonics[0])));
+ mnemonic = mnemonics[index];
+ // Assert that index is not one of the previously handled logical
+ // instructions.
+ VIXL_ASSERT(mnemonic != NULL);
+
+ if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+ nfd.SetFormatMaps(nfd.FPFormatMap());
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEON3Different(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+
+ NEONFormatDecoder nfd(instr);
+ nfd.SetFormatMap(0, nfd.LongIntegerFormatMap());
+
+ // Ignore the Q bit. Appending a "2" suffix is handled later.
+ switch (instr->Mask(NEON3DifferentMask) & ~NEON_Q) {
+ case NEON_PMULL: mnemonic = "pmull"; break;
+ case NEON_SABAL: mnemonic = "sabal"; break;
+ case NEON_SABDL: mnemonic = "sabdl"; break;
+ case NEON_SADDL: mnemonic = "saddl"; break;
+ case NEON_SMLAL: mnemonic = "smlal"; break;
+ case NEON_SMLSL: mnemonic = "smlsl"; break;
+ case NEON_SMULL: mnemonic = "smull"; break;
+ case NEON_SSUBL: mnemonic = "ssubl"; break;
+ case NEON_SQDMLAL: mnemonic = "sqdmlal"; break;
+ case NEON_SQDMLSL: mnemonic = "sqdmlsl"; break;
+ case NEON_SQDMULL: mnemonic = "sqdmull"; break;
+ case NEON_UABAL: mnemonic = "uabal"; break;
+ case NEON_UABDL: mnemonic = "uabdl"; break;
+ case NEON_UADDL: mnemonic = "uaddl"; break;
+ case NEON_UMLAL: mnemonic = "umlal"; break;
+ case NEON_UMLSL: mnemonic = "umlsl"; break;
+ case NEON_UMULL: mnemonic = "umull"; break;
+ case NEON_USUBL: mnemonic = "usubl"; break;
+ case NEON_SADDW:
+ mnemonic = "saddw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_SSUBW:
+ mnemonic = "ssubw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_UADDW:
+ mnemonic = "uaddw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_USUBW:
+ mnemonic = "usubw";
+ nfd.SetFormatMap(1, nfd.LongIntegerFormatMap());
+ break;
+ case NEON_ADDHN:
+ mnemonic = "addhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_RADDHN:
+ mnemonic = "raddhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_RSUBHN:
+ mnemonic = "rsubhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ case NEON_SUBHN:
+ mnemonic = "subhn";
+ nfd.SetFormatMaps(nfd.LongIntegerFormatMap());
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ break;
+ default: form = "(NEON3Different)";
+ }
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONAcrossLanes(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, 'Vn.%s";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap(),
+ NEONFormatDecoder::IntegerFormatMap());
+
+ if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+ nfd.SetFormatMap(1, nfd.FPFormatMap());
+ switch (instr->Mask(NEONAcrossLanesFPMask)) {
+ case NEON_FMAXV: mnemonic = "fmaxv"; break;
+ case NEON_FMINV: mnemonic = "fminv"; break;
+ case NEON_FMAXNMV: mnemonic = "fmaxnmv"; break;
+ case NEON_FMINNMV: mnemonic = "fminnmv"; break;
+ default: form = "(NEONAcrossLanes)"; break;
+ }
+ } else if (instr->Mask(NEONAcrossLanesFMask) == NEONAcrossLanesFixed) {
+ switch (instr->Mask(NEONAcrossLanesMask)) {
+ case NEON_ADDV: mnemonic = "addv"; break;
+ case NEON_SMAXV: mnemonic = "smaxv"; break;
+ case NEON_SMINV: mnemonic = "sminv"; break;
+ case NEON_UMAXV: mnemonic = "umaxv"; break;
+ case NEON_UMINV: mnemonic = "uminv"; break;
+ case NEON_SADDLV:
+ mnemonic = "saddlv";
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ break;
+ case NEON_UADDLV:
+ mnemonic = "uaddlv";
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ break;
+ default: form = "(NEONAcrossLanes)"; break;
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form,
+ NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat));
+}
+
+
+void Disassembler::VisitNEONByIndexedElement(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ bool l_instr = false;
+ bool fp_instr = false;
+
+ const char *form = "'Vd.%s, 'Vn.%s, 'Ve.%s['IVByElemIndex]";
+
+ static const NEONFormatMap map_ta = {
+ {23, 22}, {NF_UNDEF, NF_4S, NF_2D}
+ };
+ NEONFormatDecoder nfd(instr, &map_ta,
+ NEONFormatDecoder::IntegerFormatMap(),
+ NEONFormatDecoder::ScalarFormatMap());
+
+ switch (instr->Mask(NEONByIndexedElementMask)) {
+ case NEON_SMULL_byelement: mnemonic = "smull"; l_instr = true; break;
+ case NEON_UMULL_byelement: mnemonic = "umull"; l_instr = true; break;
+ case NEON_SMLAL_byelement: mnemonic = "smlal"; l_instr = true; break;
+ case NEON_UMLAL_byelement: mnemonic = "umlal"; l_instr = true; break;
+ case NEON_SMLSL_byelement: mnemonic = "smlsl"; l_instr = true; break;
+ case NEON_UMLSL_byelement: mnemonic = "umlsl"; l_instr = true; break;
+ case NEON_SQDMULL_byelement: mnemonic = "sqdmull"; l_instr = true; break;
+ case NEON_SQDMLAL_byelement: mnemonic = "sqdmlal"; l_instr = true; break;
+ case NEON_SQDMLSL_byelement: mnemonic = "sqdmlsl"; l_instr = true; break;
+ case NEON_MUL_byelement: mnemonic = "mul"; break;
+ case NEON_MLA_byelement: mnemonic = "mla"; break;
+ case NEON_MLS_byelement: mnemonic = "mls"; break;
+ case NEON_SQDMULH_byelement: mnemonic = "sqdmulh"; break;
+ case NEON_SQRDMULH_byelement: mnemonic = "sqrdmulh"; break;
+ default:
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement: mnemonic = "fmul"; fp_instr = true; break;
+ case NEON_FMLA_byelement: mnemonic = "fmla"; fp_instr = true; break;
+ case NEON_FMLS_byelement: mnemonic = "fmls"; fp_instr = true; break;
+ case NEON_FMULX_byelement: mnemonic = "fmulx"; fp_instr = true; break;
+ }
+ }
+
+ if (l_instr) {
+ Format(instr, nfd.Mnemonic(mnemonic), nfd.Substitute(form));
+ } else if (fp_instr) {
+ nfd.SetFormatMap(0, nfd.FPFormatMap());
+ Format(instr, mnemonic, nfd.Substitute(form));
+ } else {
+ nfd.SetFormatMap(0, nfd.IntegerFormatMap());
+ Format(instr, mnemonic, nfd.Substitute(form));
+ }
+}
+
+
+void Disassembler::VisitNEONCopy(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONCopy)";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap(),
+ NEONFormatDecoder::TriangularScalarFormatMap());
+
+ if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+ mnemonic = "mov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ form = "'Vd.%s['IVInsIndex1], 'Vn.%s['IVInsIndex2]";
+ } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+ mnemonic = "mov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ if (nfd.GetVectorFormat() == kFormatD) {
+ form = "'Vd.%s['IVInsIndex1], 'Xn";
+ } else {
+ form = "'Vd.%s['IVInsIndex1], 'Wn";
+ }
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+ if (instr->Mask(NEON_Q) || ((instr->ImmNEON5() & 7) == 4)) {
+ mnemonic = "mov";
+ } else {
+ mnemonic = "umov";
+ }
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ if (nfd.GetVectorFormat() == kFormatD) {
+ form = "'Xd, 'Vn.%s['IVInsIndex1]";
+ } else {
+ form = "'Wd, 'Vn.%s['IVInsIndex1]";
+ }
+ } else if (instr->Mask(NEONCopySmovMask) == NEON_SMOV) {
+ mnemonic = "smov";
+ nfd.SetFormatMap(0, nfd.TriangularScalarFormatMap());
+ form = "'Rdq, 'Vn.%s['IVInsIndex1]";
+ } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+ mnemonic = "dup";
+ form = "'Vd.%s, 'Vn.%s['IVInsIndex1]";
+ } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+ mnemonic = "dup";
+ if (nfd.GetVectorFormat() == kFormat2D) {
+ form = "'Vd.%s, 'Xn";
+ } else {
+ form = "'Vd.%s, 'Wn";
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONExtract(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONExtract)";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+ mnemonic = "ext";
+ form = "'Vd.%s, 'Vn.%s, 'Vm.%s, 'IVExtract";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreMultiStruct)";
+ const char *form_1v = "{'Vt.%1$s}, ['Xns]";
+ const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns]";
+ const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns]";
+ const char *form_4v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreMultiStructMask)) {
+ case NEON_LD1_1v: mnemonic = "ld1"; form = form_1v; break;
+ case NEON_LD1_2v: mnemonic = "ld1"; form = form_2v; break;
+ case NEON_LD1_3v: mnemonic = "ld1"; form = form_3v; break;
+ case NEON_LD1_4v: mnemonic = "ld1"; form = form_4v; break;
+ case NEON_LD2: mnemonic = "ld2"; form = form_2v; break;
+ case NEON_LD3: mnemonic = "ld3"; form = form_3v; break;
+ case NEON_LD4: mnemonic = "ld4"; form = form_4v; break;
+ case NEON_ST1_1v: mnemonic = "st1"; form = form_1v; break;
+ case NEON_ST1_2v: mnemonic = "st1"; form = form_2v; break;
+ case NEON_ST1_3v: mnemonic = "st1"; form = form_3v; break;
+ case NEON_ST1_4v: mnemonic = "st1"; form = form_4v; break;
+ case NEON_ST2: mnemonic = "st2"; form = form_2v; break;
+ case NEON_ST3: mnemonic = "st3"; form = form_3v; break;
+ case NEON_ST4: mnemonic = "st4"; form = form_4v; break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreMultiStructPostIndex)";
+ const char *form_1v = "{'Vt.%1$s}, ['Xns], 'Xmr1";
+ const char *form_2v = "{'Vt.%1$s, 'Vt2.%1$s}, ['Xns], 'Xmr2";
+ const char *form_3v = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s}, ['Xns], 'Xmr3";
+ const char *form_4v =
+ "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmr4";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD1_1v_post: mnemonic = "ld1"; form = form_1v; break;
+ case NEON_LD1_2v_post: mnemonic = "ld1"; form = form_2v; break;
+ case NEON_LD1_3v_post: mnemonic = "ld1"; form = form_3v; break;
+ case NEON_LD1_4v_post: mnemonic = "ld1"; form = form_4v; break;
+ case NEON_LD2_post: mnemonic = "ld2"; form = form_2v; break;
+ case NEON_LD3_post: mnemonic = "ld3"; form = form_3v; break;
+ case NEON_LD4_post: mnemonic = "ld4"; form = form_4v; break;
+ case NEON_ST1_1v_post: mnemonic = "st1"; form = form_1v; break;
+ case NEON_ST1_2v_post: mnemonic = "st1"; form = form_2v; break;
+ case NEON_ST1_3v_post: mnemonic = "st1"; form = form_3v; break;
+ case NEON_ST1_4v_post: mnemonic = "st1"; form = form_4v; break;
+ case NEON_ST2_post: mnemonic = "st2"; form = form_2v; break;
+ case NEON_ST3_post: mnemonic = "st3"; form = form_3v; break;
+ case NEON_ST4_post: mnemonic = "st4"; form = form_4v; break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreSingleStruct)";
+
+ const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns]";
+ const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns]";
+ const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns]";
+ const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreSingleStructMask)) {
+ case NEON_LD1_b: mnemonic = "ld1"; form = form_1b; break;
+ case NEON_LD1_h: mnemonic = "ld1"; form = form_1h; break;
+ case NEON_LD1_s:
+ mnemonic = "ld1";
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_ST1_b: mnemonic = "st1"; form = form_1b; break;
+ case NEON_ST1_h: mnemonic = "st1"; form = form_1h; break;
+ case NEON_ST1_s:
+ mnemonic = "st1";
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_LD1R:
+ mnemonic = "ld1r";
+ form = "{'Vt.%s}, ['Xns]";
+ break;
+ case NEON_LD2_b:
+ case NEON_ST2_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD2_h:
+ case NEON_ST2_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD2_s:
+ case NEON_ST2_s:
+ VIXL_STATIC_ASSERT((NEON_ST2_s | (1 << NEONLSSize_offset)) == NEON_ST2_d);
+ VIXL_STATIC_ASSERT((NEON_LD2_s | (1 << NEONLSSize_offset)) == NEON_LD2_d);
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD2R:
+ mnemonic = "ld2r";
+ form = "{'Vt.%s, 'Vt2.%s}, ['Xns]";
+ break;
+ case NEON_LD3_b:
+ case NEON_ST3_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD3_h:
+ case NEON_ST3_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD3_s:
+ case NEON_ST3_s:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD3R:
+ mnemonic = "ld3r";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns]";
+ break;
+ case NEON_LD4_b:
+ case NEON_ST4_b:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns]";
+ break;
+ case NEON_LD4_h:
+ case NEON_ST4_h:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns]";
+ break;
+ case NEON_LD4_s:
+ case NEON_ST4_s:
+ VIXL_STATIC_ASSERT((NEON_LD4_s | (1 << NEONLSSize_offset)) == NEON_LD4_d);
+ VIXL_STATIC_ASSERT((NEON_ST4_s | (1 << NEONLSSize_offset)) == NEON_ST4_d);
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns]";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns]";
+ break;
+ case NEON_LD4R:
+ mnemonic = "ld4r";
+ form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns]";
+ break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONLoadStoreSingleStructPostIndex)";
+
+ const char *form_1b = "{'Vt.b}['IVLSLane0], ['Xns], 'Xmb1";
+ const char *form_1h = "{'Vt.h}['IVLSLane1], ['Xns], 'Xmb2";
+ const char *form_1s = "{'Vt.s}['IVLSLane2], ['Xns], 'Xmb4";
+ const char *form_1d = "{'Vt.d}['IVLSLane3], ['Xns], 'Xmb8";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_b_post: mnemonic = "ld1"; form = form_1b; break;
+ case NEON_LD1_h_post: mnemonic = "ld1"; form = form_1h; break;
+ case NEON_LD1_s_post:
+ mnemonic = "ld1";
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_ST1_b_post: mnemonic = "st1"; form = form_1b; break;
+ case NEON_ST1_h_post: mnemonic = "st1"; form = form_1h; break;
+ case NEON_ST1_s_post:
+ mnemonic = "st1";
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ form = ((instr->NEONLSSize() & 1) == 0) ? form_1s : form_1d;
+ break;
+ case NEON_LD1R_post:
+ mnemonic = "ld1r";
+ form = "{'Vt.%s}, ['Xns], 'Xmz1";
+ break;
+ case NEON_LD2_b_post:
+ case NEON_ST2_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.b, 'Vt2.b}['IVLSLane0], ['Xns], 'Xmb2";
+ break;
+ case NEON_ST2_h_post:
+ case NEON_LD2_h_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ form = "{'Vt.h, 'Vt2.h}['IVLSLane1], ['Xns], 'Xmb4";
+ break;
+ case NEON_LD2_s_post:
+ case NEON_ST2_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld2" : "st2";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s}['IVLSLane2], ['Xns], 'Xmb8";
+ else
+ form = "{'Vt.d, 'Vt2.d}['IVLSLane3], ['Xns], 'Xmb16";
+ break;
+ case NEON_LD2R_post:
+ mnemonic = "ld2r";
+ form = "{'Vt.%s, 'Vt2.%s}, ['Xns], 'Xmz2";
+ break;
+ case NEON_LD3_b_post:
+ case NEON_ST3_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b}['IVLSLane0], ['Xns], 'Xmb3";
+ break;
+ case NEON_LD3_h_post:
+ case NEON_ST3_h_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h}['IVLSLane1], ['Xns], 'Xmb6";
+ break;
+ case NEON_LD3_s_post:
+ case NEON_ST3_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld3" : "st3";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s}['IVLSLane2], ['Xns], 'Xmb12";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d}['IVLSLane3], ['Xns], 'Xmr3";
+ break;
+ case NEON_LD3R_post:
+ mnemonic = "ld3r";
+ form = "{'Vt.%s, 'Vt2.%s, 'Vt3.%s}, ['Xns], 'Xmz3";
+ break;
+ case NEON_LD4_b_post:
+ case NEON_ST4_b_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ form = "{'Vt.b, 'Vt2.b, 'Vt3.b, 'Vt4.b}['IVLSLane0], ['Xns], 'Xmb4";
+ break;
+ case NEON_LD4_h_post:
+ case NEON_ST4_h_post:
+ mnemonic = (instr->LdStXLoad()) == 1 ? "ld4" : "st4";
+ form = "{'Vt.h, 'Vt2.h, 'Vt3.h, 'Vt4.h}['IVLSLane1], ['Xns], 'Xmb8";
+ break;
+ case NEON_LD4_s_post:
+ case NEON_ST4_s_post:
+ mnemonic = (instr->LdStXLoad() == 1) ? "ld4" : "st4";
+ if ((instr->NEONLSSize() & 1) == 0)
+ form = "{'Vt.s, 'Vt2.s, 'Vt3.s, 'Vt4.s}['IVLSLane2], ['Xns], 'Xmb16";
+ else
+ form = "{'Vt.d, 'Vt2.d, 'Vt3.d, 'Vt4.d}['IVLSLane3], ['Xns], 'Xmb32";
+ break;
+ case NEON_LD4R_post:
+ mnemonic = "ld4r";
+ form = "{'Vt.%1$s, 'Vt2.%1$s, 'Vt3.%1$s, 'Vt4.%1$s}, ['Xns], 'Xmz4";
+ break;
+ default: break;
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONModifiedImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vt.%s, 'IVMIImm8, lsl 'IVMIShiftAmt1";
+
+ int cmode = instr->NEONCmode();
+ int cmode_3 = (cmode >> 3) & 1;
+ int cmode_2 = (cmode >> 2) & 1;
+ int cmode_1 = (cmode >> 1) & 1;
+ int cmode_0 = cmode & 1;
+ int q = instr->NEONQ();
+ int op = instr->NEONModImmOp();
+
+ static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} };
+ static const NEONFormatMap map_h = { {30}, {NF_4H, NF_8H} };
+ static const NEONFormatMap map_s = { {30}, {NF_2S, NF_4S} };
+ NEONFormatDecoder nfd(instr, &map_b);
+
+ if (cmode_3 == 0) {
+ if (cmode_0 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ } else { // cmode<0> == '1'.
+ mnemonic = (op == 1) ? "bic" : "orr";
+ }
+ nfd.SetFormatMap(0, &map_s);
+ } else { // cmode<3> == '1'.
+ if (cmode_2 == 0) {
+ if (cmode_0 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ } else { // cmode<0> == '1'.
+ mnemonic = (op == 1) ? "bic" : "orr";
+ }
+ nfd.SetFormatMap(0, &map_h);
+ } else { // cmode<2> == '1'.
+ if (cmode_1 == 0) {
+ mnemonic = (op == 1) ? "mvni" : "movi";
+ form = "'Vt.%s, 'IVMIImm8, msl 'IVMIShiftAmt2";
+ nfd.SetFormatMap(0, &map_s);
+ } else { // cmode<1> == '1'.
+ if (cmode_0 == 0) {
+ mnemonic = "movi";
+ if (op == 0) {
+ form = "'Vt.%s, 'IVMIImm8";
+ } else {
+ form = (q == 0) ? "'Dd, 'IVMIImm" : "'Vt.2d, 'IVMIImm";
+ }
+ } else { // cmode<0> == '1'
+ mnemonic = "fmov";
+ if (op == 0) {
+ form = "'Vt.%s, 'IVMIImmFPSingle";
+ nfd.SetFormatMap(0, &map_s);
+ } else {
+ if (q == 1) {
+ form = "'Vt.2d, 'IVMIImmFPDouble";
+ }
+ }
+ }
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn";
+ const char *form_0 = "%sd, %sn, #0";
+ const char *form_fp0 = "%sd, %sn, #0.0";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_CMGT_zero_scalar: mnemonic = "cmgt"; form = form_0; break;
+ case NEON_CMGE_zero_scalar: mnemonic = "cmge"; form = form_0; break;
+ case NEON_CMLE_zero_scalar: mnemonic = "cmle"; form = form_0; break;
+ case NEON_CMLT_zero_scalar: mnemonic = "cmlt"; form = form_0; break;
+ case NEON_CMEQ_zero_scalar: mnemonic = "cmeq"; form = form_0; break;
+ case NEON_NEG_scalar: mnemonic = "neg"; break;
+ case NEON_SQNEG_scalar: mnemonic = "sqneg"; break;
+ case NEON_ABS_scalar: mnemonic = "abs"; break;
+ case NEON_SQABS_scalar: mnemonic = "sqabs"; break;
+ case NEON_SUQADD_scalar: mnemonic = "suqadd"; break;
+ case NEON_USQADD_scalar: mnemonic = "usqadd"; break;
+ default: form = "(NEONScalar2RegMisc)";
+ }
+ } else {
+ // These instructions all use a one bit size field, except SQXTUN, SQXTN
+ // and UQXTN, which use a two bit size field.
+ nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+ case NEON_FRSQRTE_scalar: mnemonic = "frsqrte"; break;
+ case NEON_FRECPE_scalar: mnemonic = "frecpe"; break;
+ case NEON_SCVTF_scalar: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_scalar: mnemonic = "ucvtf"; break;
+ case NEON_FCMGT_zero_scalar: mnemonic = "fcmgt"; form = form_fp0; break;
+ case NEON_FCMGE_zero_scalar: mnemonic = "fcmge"; form = form_fp0; break;
+ case NEON_FCMLE_zero_scalar: mnemonic = "fcmle"; form = form_fp0; break;
+ case NEON_FCMLT_zero_scalar: mnemonic = "fcmlt"; form = form_fp0; break;
+ case NEON_FCMEQ_zero_scalar: mnemonic = "fcmeq"; form = form_fp0; break;
+ case NEON_FRECPX_scalar: mnemonic = "frecpx"; break;
+ case NEON_FCVTNS_scalar: mnemonic = "fcvtns"; break;
+ case NEON_FCVTNU_scalar: mnemonic = "fcvtnu"; break;
+ case NEON_FCVTPS_scalar: mnemonic = "fcvtps"; break;
+ case NEON_FCVTPU_scalar: mnemonic = "fcvtpu"; break;
+ case NEON_FCVTMS_scalar: mnemonic = "fcvtms"; break;
+ case NEON_FCVTMU_scalar: mnemonic = "fcvtmu"; break;
+ case NEON_FCVTZS_scalar: mnemonic = "fcvtzs"; break;
+ case NEON_FCVTZU_scalar: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTAS_scalar: mnemonic = "fcvtas"; break;
+ case NEON_FCVTAU_scalar: mnemonic = "fcvtau"; break;
+ case NEON_FCVTXN_scalar:
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ mnemonic = "fcvtxn";
+ break;
+ default:
+ nfd.SetFormatMap(0, nfd.ScalarFormatMap());
+ nfd.SetFormatMap(1, nfd.LongScalarFormatMap());
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_SQXTN_scalar: mnemonic = "sqxtn"; break;
+ case NEON_UQXTN_scalar: mnemonic = "uqxtn"; break;
+ case NEON_SQXTUN_scalar: mnemonic = "sqxtun"; break;
+ default: form = "(NEONScalar2RegMisc)";
+ }
+ }
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalar3Diff(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, %sm";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap(),
+ NEONFormatDecoder::ScalarFormatMap());
+
+ switch (instr->Mask(NEONScalar3DiffMask)) {
+ case NEON_SQDMLAL_scalar : mnemonic = "sqdmlal"; break;
+ case NEON_SQDMLSL_scalar : mnemonic = "sqdmlsl"; break;
+ case NEON_SQDMULL_scalar : mnemonic = "sqdmull"; break;
+ default: form = "(NEONScalar3Diff)";
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalar3Same(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, %sm";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+
+ if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+ nfd.SetFormatMaps(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar3SameFPMask)) {
+ case NEON_FACGE_scalar: mnemonic = "facge"; break;
+ case NEON_FACGT_scalar: mnemonic = "facgt"; break;
+ case NEON_FCMEQ_scalar: mnemonic = "fcmeq"; break;
+ case NEON_FCMGE_scalar: mnemonic = "fcmge"; break;
+ case NEON_FCMGT_scalar: mnemonic = "fcmgt"; break;
+ case NEON_FMULX_scalar: mnemonic = "fmulx"; break;
+ case NEON_FRECPS_scalar: mnemonic = "frecps"; break;
+ case NEON_FRSQRTS_scalar: mnemonic = "frsqrts"; break;
+ case NEON_FABD_scalar: mnemonic = "fabd"; break;
+ default: form = "(NEONScalar3Same)";
+ }
+ } else {
+ switch (instr->Mask(NEONScalar3SameMask)) {
+ case NEON_ADD_scalar: mnemonic = "add"; break;
+ case NEON_SUB_scalar: mnemonic = "sub"; break;
+ case NEON_CMEQ_scalar: mnemonic = "cmeq"; break;
+ case NEON_CMGE_scalar: mnemonic = "cmge"; break;
+ case NEON_CMGT_scalar: mnemonic = "cmgt"; break;
+ case NEON_CMHI_scalar: mnemonic = "cmhi"; break;
+ case NEON_CMHS_scalar: mnemonic = "cmhs"; break;
+ case NEON_CMTST_scalar: mnemonic = "cmtst"; break;
+ case NEON_UQADD_scalar: mnemonic = "uqadd"; break;
+ case NEON_SQADD_scalar: mnemonic = "sqadd"; break;
+ case NEON_UQSUB_scalar: mnemonic = "uqsub"; break;
+ case NEON_SQSUB_scalar: mnemonic = "sqsub"; break;
+ case NEON_USHL_scalar: mnemonic = "ushl"; break;
+ case NEON_SSHL_scalar: mnemonic = "sshl"; break;
+ case NEON_UQSHL_scalar: mnemonic = "uqshl"; break;
+ case NEON_SQSHL_scalar: mnemonic = "sqshl"; break;
+ case NEON_URSHL_scalar: mnemonic = "urshl"; break;
+ case NEON_SRSHL_scalar: mnemonic = "srshl"; break;
+ case NEON_UQRSHL_scalar: mnemonic = "uqrshl"; break;
+ case NEON_SQRSHL_scalar: mnemonic = "sqrshl"; break;
+ case NEON_SQDMULH_scalar: mnemonic = "sqdmulh"; break;
+ case NEON_SQRDMULH_scalar: mnemonic = "sqrdmulh"; break;
+ default: form = "(NEONScalar3Same)";
+ }
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, 'Ve.%s['IVByElemIndex]";
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ bool long_instr = false;
+
+ switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+ case NEON_SQDMULL_byelement_scalar:
+ mnemonic = "sqdmull";
+ long_instr = true;
+ break;
+ case NEON_SQDMLAL_byelement_scalar:
+ mnemonic = "sqdmlal";
+ long_instr = true;
+ break;
+ case NEON_SQDMLSL_byelement_scalar:
+ mnemonic = "sqdmlsl";
+ long_instr = true;
+ break;
+ case NEON_SQDMULH_byelement_scalar:
+ mnemonic = "sqdmulh";
+ break;
+ case NEON_SQRDMULH_byelement_scalar:
+ mnemonic = "sqrdmulh";
+ break;
+ default:
+ nfd.SetFormatMap(0, nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement_scalar: mnemonic = "fmul"; break;
+ case NEON_FMLA_byelement_scalar: mnemonic = "fmla"; break;
+ case NEON_FMLS_byelement_scalar: mnemonic = "fmls"; break;
+ case NEON_FMULX_byelement_scalar: mnemonic = "fmulx"; break;
+ default: form = "(NEONScalarByIndexedElement)";
+ }
+ }
+
+ if (long_instr) {
+ nfd.SetFormatMap(0, nfd.LongScalarFormatMap());
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(
+ form, nfd.kPlaceholder, nfd.kPlaceholder, nfd.kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarCopy(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONScalarCopy)";
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+
+ if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+ mnemonic = "mov";
+ form = "%sd, 'Vn.%s['IVInsIndex1]";
+ }
+
+ Format(instr, mnemonic, nfd.Substitute(form, nfd.kPlaceholder, nfd.kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarPairwise(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, 'Vn.%s";
+ NEONFormatMap map = { {22}, {NF_2S, NF_2D} };
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap(), &map);
+
+ switch (instr->Mask(NEONScalarPairwiseMask)) {
+ case NEON_ADDP_scalar: mnemonic = "addp"; break;
+ case NEON_FADDP_scalar: mnemonic = "faddp"; break;
+ case NEON_FMAXP_scalar: mnemonic = "fmaxp"; break;
+ case NEON_FMAXNMP_scalar: mnemonic = "fmaxnmp"; break;
+ case NEON_FMINP_scalar: mnemonic = "fminp"; break;
+ case NEON_FMINNMP_scalar: mnemonic = "fminnmp"; break;
+ default: form = "(NEONScalarPairwise)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form,
+ NEONFormatDecoder::kPlaceholder, NEONFormatDecoder::kFormat));
+}
+
+
+void Disassembler::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "%sd, %sn, 'Is1";
+ const char *form_2 = "%sd, %sn, 'Is2";
+
+ static const NEONFormatMap map_shift = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S,
+ NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D}
+ };
+ static const NEONFormatMap map_shift_narrow = {
+ {21, 20, 19},
+ {NF_UNDEF, NF_H, NF_S, NF_S, NF_D, NF_D, NF_D, NF_D}
+ };
+ NEONFormatDecoder nfd(instr, &map_shift);
+
+ if (instr->ImmNEONImmh()) { // immh has to be non-zero.
+ switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+ case NEON_FCVTZU_imm_scalar: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTZS_imm_scalar: mnemonic = "fcvtzs"; break;
+ case NEON_SCVTF_imm_scalar: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_imm_scalar: mnemonic = "ucvtf"; break;
+ case NEON_SRI_scalar: mnemonic = "sri"; break;
+ case NEON_SSHR_scalar: mnemonic = "sshr"; break;
+ case NEON_USHR_scalar: mnemonic = "ushr"; break;
+ case NEON_SRSHR_scalar: mnemonic = "srshr"; break;
+ case NEON_URSHR_scalar: mnemonic = "urshr"; break;
+ case NEON_SSRA_scalar: mnemonic = "ssra"; break;
+ case NEON_USRA_scalar: mnemonic = "usra"; break;
+ case NEON_SRSRA_scalar: mnemonic = "srsra"; break;
+ case NEON_URSRA_scalar: mnemonic = "ursra"; break;
+ case NEON_SHL_scalar: mnemonic = "shl"; form = form_2; break;
+ case NEON_SLI_scalar: mnemonic = "sli"; form = form_2; break;
+ case NEON_SQSHLU_scalar: mnemonic = "sqshlu"; form = form_2; break;
+ case NEON_SQSHL_imm_scalar: mnemonic = "sqshl"; form = form_2; break;
+ case NEON_UQSHL_imm_scalar: mnemonic = "uqshl"; form = form_2; break;
+ case NEON_UQSHRN_scalar:
+ mnemonic = "uqshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_UQRSHRN_scalar:
+ mnemonic = "uqrshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQSHRN_scalar:
+ mnemonic = "sqshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQRSHRN_scalar:
+ mnemonic = "sqrshrn";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQSHRUN_scalar:
+ mnemonic = "sqshrun";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ case NEON_SQRSHRUN_scalar:
+ mnemonic = "sqrshrun";
+ nfd.SetFormatMap(1, &map_shift_narrow);
+ break;
+ default:
+ form = "(NEONScalarShiftImmediate)";
+ }
+ } else {
+ form = "(NEONScalarShiftImmediate)";
+ }
+ Format(instr, mnemonic, nfd.SubstitutePlaceholders(form));
+}
+
+
+void Disassembler::VisitNEONShiftImmediate(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Is1";
+ const char *form_shift_2 = "'Vd.%s, 'Vn.%s, 'Is2";
+ const char *form_xtl = "'Vd.%s, 'Vn.%s";
+
+ // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+ static const NEONFormatMap map_shift_ta = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}
+ };
+
+ // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+ // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+ static const NEONFormatMap map_shift_tb = {
+ {22, 21, 20, 19, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
+ NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}
+ };
+
+ NEONFormatDecoder nfd(instr, &map_shift_tb);
+
+ if (instr->ImmNEONImmh()) { // immh has to be non-zero.
+ switch (instr->Mask(NEONShiftImmediateMask)) {
+ case NEON_SQSHLU: mnemonic = "sqshlu"; form = form_shift_2; break;
+ case NEON_SQSHL_imm: mnemonic = "sqshl"; form = form_shift_2; break;
+ case NEON_UQSHL_imm: mnemonic = "uqshl"; form = form_shift_2; break;
+ case NEON_SHL: mnemonic = "shl"; form = form_shift_2; break;
+ case NEON_SLI: mnemonic = "sli"; form = form_shift_2; break;
+ case NEON_SCVTF_imm: mnemonic = "scvtf"; break;
+ case NEON_UCVTF_imm: mnemonic = "ucvtf"; break;
+ case NEON_FCVTZU_imm: mnemonic = "fcvtzu"; break;
+ case NEON_FCVTZS_imm: mnemonic = "fcvtzs"; break;
+ case NEON_SRI: mnemonic = "sri"; break;
+ case NEON_SSHR: mnemonic = "sshr"; break;
+ case NEON_USHR: mnemonic = "ushr"; break;
+ case NEON_SRSHR: mnemonic = "srshr"; break;
+ case NEON_URSHR: mnemonic = "urshr"; break;
+ case NEON_SSRA: mnemonic = "ssra"; break;
+ case NEON_USRA: mnemonic = "usra"; break;
+ case NEON_SRSRA: mnemonic = "srsra"; break;
+ case NEON_URSRA: mnemonic = "ursra"; break;
+ case NEON_SHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "shrn2" : "shrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_RSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "rshrn2" : "rshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_UQSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "uqshrn2" : "uqshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_UQRSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "uqrshrn2" : "uqrshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqshrn2" : "sqshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQRSHRN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqrshrn2" : "sqrshrn";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQSHRUN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqshrun2" : "sqshrun";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SQRSHRUN:
+ mnemonic = instr->Mask(NEON_Q) ? "sqrshrun2" : "sqrshrun";
+ nfd.SetFormatMap(1, &map_shift_ta);
+ break;
+ case NEON_SSHLL:
+ nfd.SetFormatMap(0, &map_shift_ta);
+ if (instr->ImmNEONImmb() == 0 &&
+ CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // sxtl variant.
+ form = form_xtl;
+ mnemonic = instr->Mask(NEON_Q) ? "sxtl2" : "sxtl";
+ } else { // sshll variant.
+ form = form_shift_2;
+ mnemonic = instr->Mask(NEON_Q) ? "sshll2" : "sshll";
+ }
+ break;
+ case NEON_USHLL:
+ nfd.SetFormatMap(0, &map_shift_ta);
+ if (instr->ImmNEONImmb() == 0 &&
+ CountSetBits(instr->ImmNEONImmh(), 32) == 1) { // uxtl variant.
+ form = form_xtl;
+ mnemonic = instr->Mask(NEON_Q) ? "uxtl2" : "uxtl";
+ } else { // ushll variant.
+ form = form_shift_2;
+ mnemonic = instr->Mask(NEON_Q) ? "ushll2" : "ushll";
+ }
+ break;
+ default: form = "(NEONShiftImmediate)";
+ }
+ } else {
+ form = "(NEONShiftImmediate)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitNEONTable(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "(NEONTable)";
+ const char form_1v[] = "'Vd.%%s, {'Vn.16b}, 'Vm.%%s";
+ const char form_2v[] = "'Vd.%%s, {'Vn.16b, v%d.16b}, 'Vm.%%s";
+ const char form_3v[] = "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+ const char form_4v[] =
+ "'Vd.%%s, {'Vn.16b, v%d.16b, v%d.16b, v%d.16b}, 'Vm.%%s";
+ static const NEONFormatMap map_b = { {30}, {NF_8B, NF_16B} };
+ NEONFormatDecoder nfd(instr, &map_b);
+
+ switch (instr->Mask(NEONTableMask)) {
+ case NEON_TBL_1v: mnemonic = "tbl"; form = form_1v; break;
+ case NEON_TBL_2v: mnemonic = "tbl"; form = form_2v; break;
+ case NEON_TBL_3v: mnemonic = "tbl"; form = form_3v; break;
+ case NEON_TBL_4v: mnemonic = "tbl"; form = form_4v; break;
+ case NEON_TBX_1v: mnemonic = "tbx"; form = form_1v; break;
+ case NEON_TBX_2v: mnemonic = "tbx"; form = form_2v; break;
+ case NEON_TBX_3v: mnemonic = "tbx"; form = form_3v; break;
+ case NEON_TBX_4v: mnemonic = "tbx"; form = form_4v; break;
+ default: break;
+ }
+
+ char re_form[sizeof(form_4v) + 6];
+ int reg_num = instr->Rn();
+ SprintfLiteral(re_form, form,
+ (reg_num + 1) % kNumberOfVRegisters,
+ (reg_num + 2) % kNumberOfVRegisters,
+ (reg_num + 3) % kNumberOfVRegisters);
+
+ Format(instr, mnemonic, nfd.Substitute(re_form));
+}
+
+
+void Disassembler::VisitNEONPerm(const Instruction* instr) {
+ const char *mnemonic = "unimplemented";
+ const char *form = "'Vd.%s, 'Vn.%s, 'Vm.%s";
+ NEONFormatDecoder nfd(instr);
+
+ switch (instr->Mask(NEONPermMask)) {
+ case NEON_TRN1: mnemonic = "trn1"; break;
+ case NEON_TRN2: mnemonic = "trn2"; break;
+ case NEON_UZP1: mnemonic = "uzp1"; break;
+ case NEON_UZP2: mnemonic = "uzp2"; break;
+ case NEON_ZIP1: mnemonic = "zip1"; break;
+ case NEON_ZIP2: mnemonic = "zip2"; break;
+ default: form = "(NEONPerm)";
+ }
+ Format(instr, mnemonic, nfd.Substitute(form));
+}
+
+
+void Disassembler::VisitUnimplemented(const Instruction* instr) {
+ Format(instr, "unimplemented", "(Unimplemented)");
+}
+
+
+void Disassembler::VisitUnallocated(const Instruction* instr) {
+ Format(instr, "unallocated", "(Unallocated)");
+}
+
+
+void Disassembler::ProcessOutput(const Instruction* /*instr*/) {
+ // The base disasm does nothing more than disassembling into a buffer.
+}
+
+
+void Disassembler::AppendRegisterNameToOutput(const Instruction* instr,
+ const CPURegister& reg) {
+ USE(instr);
+ VIXL_ASSERT(reg.IsValid());
+ char reg_char;
+
+ if (reg.IsRegister()) {
+ reg_char = reg.Is64Bits() ? 'x' : 'w';
+ } else {
+ VIXL_ASSERT(reg.IsVRegister());
+ switch (reg.SizeInBits()) {
+ case kBRegSize: reg_char = 'b'; break;
+ case kHRegSize: reg_char = 'h'; break;
+ case kSRegSize: reg_char = 's'; break;
+ case kDRegSize: reg_char = 'd'; break;
+ default:
+ VIXL_ASSERT(reg.Is128Bits());
+ reg_char = 'q';
+ }
+ }
+
+ if (reg.IsVRegister() || !(reg.Aliases(sp) || reg.Aliases(xzr))) {
+ // A core or scalar/vector register: [wx]0 - 30, [bhsdq]0 - 31.
+ AppendToOutput("%c%d", reg_char, reg.code());
+ } else if (reg.Aliases(sp)) {
+ // Disassemble w31/x31 as stack pointer wsp/sp.
+ AppendToOutput("%s", reg.Is64Bits() ? "sp" : "wsp");
+ } else {
+ // Disassemble w31/x31 as zero register wzr/xzr.
+ AppendToOutput("%czr", reg_char);
+ }
+}
+
+
+void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
+ int64_t offset) {
+ USE(instr);
+ char sign = (offset < 0) ? '-' : '+';
+ AppendToOutput("#%c0x%" PRIx64, sign, std::abs(offset));
+}
+
+
+void Disassembler::AppendAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ USE(instr);
+ AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
+}
+
+
+void Disassembler::AppendCodeAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ AppendAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ AppendAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
+ const void* addr) {
+ USE(instr);
+ int64_t rel_addr = CodeRelativeAddress(addr);
+ if (rel_addr >= 0) {
+ AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
+ } else {
+ AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
+ }
+}
+
+
+void Disassembler::AppendCodeRelativeCodeAddressToOutput(
+ const Instruction* instr, const void* addr) {
+ AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::AppendCodeRelativeDataAddressToOutput(
+ const Instruction* instr, const void* addr) {
+ AppendCodeRelativeAddressToOutput(instr, addr);
+}
+
+
+void Disassembler::MapCodeAddress(int64_t base_address,
+ const Instruction* instr_address) {
+ set_code_address_offset(
+ base_address - reinterpret_cast<intptr_t>(instr_address));
+}
+int64_t Disassembler::CodeRelativeAddress(const void* addr) {
+ return reinterpret_cast<intptr_t>(addr) + code_address_offset();
+}
+
+
+void Disassembler::Format(const Instruction* instr, const char* mnemonic,
+ const char* format) {
+ VIXL_ASSERT(mnemonic != NULL);
+ ResetOutput();
+ uint32_t pos = buffer_pos_;
+ Substitute(instr, mnemonic);
+ if (format != NULL) {
+ uint32_t spaces = buffer_pos_ - pos < 8 ? 8 - (buffer_pos_ - pos) : 1;
+ while (spaces--) {
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_++] = ' ';
+ }
+ Substitute(instr, format);
+ }
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_] = 0;
+ ProcessOutput(instr);
+}
+
+
+void Disassembler::Substitute(const Instruction* instr, const char* string) {
+ char chr = *string++;
+ while (chr != '\0') {
+ if (chr == '\'') {
+ string += SubstituteField(instr, string);
+ } else {
+ VIXL_ASSERT(buffer_pos_ < buffer_size_);
+ buffer_[buffer_pos_++] = chr;
+ }
+ chr = *string++;
+ }
+}
+
+
+int Disassembler::SubstituteField(const Instruction* instr,
+ const char* format) {
+ switch (format[0]) {
+ // NB. The remaining substitution prefix characters are: GJKUZ.
+ case 'R': // Register. X or W, selected by sf bit.
+ case 'F': // FP register. S or D, selected by type field.
+ case 'V': // Vector register, V, vector format.
+ case 'W':
+ case 'X':
+ case 'B':
+ case 'H':
+ case 'S':
+ case 'D':
+ case 'Q': return SubstituteRegisterField(instr, format);
+ case 'I': return SubstituteImmediateField(instr, format);
+ case 'L': return SubstituteLiteralField(instr, format);
+ case 'N': return SubstituteShiftField(instr, format);
+ case 'P': return SubstitutePrefetchField(instr, format);
+ case 'C': return SubstituteConditionField(instr, format);
+ case 'E': return SubstituteExtendField(instr, format);
+ case 'A': return SubstitutePCRelAddressField(instr, format);
+ case 'T': return SubstituteBranchTargetField(instr, format);
+ case 'O': return SubstituteLSRegOffsetField(instr, format);
+ case 'M': return SubstituteBarrierField(instr, format);
+ case 'K': return SubstituteCrField(instr, format);
+ case 'G': return SubstituteSysOpField(instr, format);
+ default: {
+ VIXL_UNREACHABLE();
+ return 1;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteRegisterField(const Instruction* instr,
+ const char* format) {
+ char reg_prefix = format[0];
+ unsigned reg_num = 0;
+ unsigned field_len = 2;
+
+ switch (format[1]) {
+ case 'd':
+ reg_num = instr->Rd();
+ if (format[2] == 'q') {
+ reg_prefix = instr->NEONQ() ? 'X' : 'W';
+ field_len = 3;
+ }
+ break;
+ case 'n': reg_num = instr->Rn(); break;
+ case 'm':
+ reg_num = instr->Rm();
+ switch (format[2]) {
+ // Handle registers tagged with b (bytes), z (instruction), or
+ // r (registers), used for address updates in
+ // NEON load/store instructions.
+ case 'r':
+ case 'b':
+ case 'z': {
+ field_len = 3;
+ char* eimm;
+ int imm = static_cast<int>(strtol(&format[3], &eimm, 10));
+ field_len += eimm - &format[3];
+ if (reg_num == 31) {
+ switch (format[2]) {
+ case 'z':
+ imm *= (1 << instr->NEONLSSize());
+ break;
+ case 'r':
+ imm *= (instr->NEONQ() == 0) ? kDRegSizeInBytes
+ : kQRegSizeInBytes;
+ break;
+ case 'b':
+ break;
+ }
+ AppendToOutput("#%d", imm);
+ return field_len;
+ }
+ break;
+ }
+ }
+ break;
+ case 'e':
+ // This is register Rm, but using a 4-bit specifier. Used in NEON
+ // by-element instructions.
+ reg_num = (instr->Rm() & 0xf);
+ break;
+ case 'a': reg_num = instr->Ra(); break;
+ case 's': reg_num = instr->Rs(); break;
+ case 't':
+ reg_num = instr->Rt();
+ if (format[0] == 'V') {
+ if ((format[2] >= '2') && (format[2] <= '4')) {
+ // Handle consecutive vector register specifiers Vt2, Vt3 and Vt4.
+ reg_num = (reg_num + format[2] - '1') % 32;
+ field_len = 3;
+ }
+ } else {
+ if (format[2] == '2') {
+ // Handle register specifier Rt2.
+ reg_num = instr->Rt2();
+ field_len = 3;
+ }
+ }
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+
+ // Increase field length for registers tagged as stack.
+ if (format[2] == 's') {
+ field_len = 3;
+ }
+
+ CPURegister::RegisterType reg_type = CPURegister::kRegister;
+ unsigned reg_size = kXRegSize;
+
+ if (reg_prefix == 'R') {
+ reg_prefix = instr->SixtyFourBits() ? 'X' : 'W';
+ } else if (reg_prefix == 'F') {
+ reg_prefix = ((instr->FPType() & 1) == 0) ? 'S' : 'D';
+ }
+
+ switch (reg_prefix) {
+ case 'W':
+ reg_type = CPURegister::kRegister; reg_size = kWRegSize; break;
+ case 'X':
+ reg_type = CPURegister::kRegister; reg_size = kXRegSize; break;
+ case 'B':
+ reg_type = CPURegister::kVRegister; reg_size = kBRegSize; break;
+ case 'H':
+ reg_type = CPURegister::kVRegister; reg_size = kHRegSize; break;
+ case 'S':
+ reg_type = CPURegister::kVRegister; reg_size = kSRegSize; break;
+ case 'D':
+ reg_type = CPURegister::kVRegister; reg_size = kDRegSize; break;
+ case 'Q':
+ reg_type = CPURegister::kVRegister; reg_size = kQRegSize; break;
+ case 'V':
+ AppendToOutput("v%d", reg_num);
+ return field_len;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ if ((reg_type == CPURegister::kRegister) &&
+ (reg_num == kZeroRegCode) && (format[2] == 's')) {
+ reg_num = kSPRegInternalCode;
+ }
+
+ AppendRegisterNameToOutput(instr, CPURegister(reg_num, reg_size, reg_type));
+
+ return field_len;
+}
+
+
+int Disassembler::SubstituteImmediateField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'I');
+
+ switch (format[1]) {
+ case 'M': { // IMoveImm, IMoveNeg or IMoveLSL.
+ if (format[5] == 'L') {
+ AppendToOutput("#0x%" PRIx32, instr->ImmMoveWide());
+ if (instr->ShiftMoveWide() > 0) {
+ AppendToOutput(", lsl #%" PRId32, 16 * instr->ShiftMoveWide());
+ }
+ } else {
+ VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
+ uint64_t imm = static_cast<uint64_t>(instr->ImmMoveWide()) <<
+ (16 * instr->ShiftMoveWide());
+ if (format[5] == 'N')
+ imm = ~imm;
+ if (!instr->SixtyFourBits())
+ imm &= UINT64_C(0xffffffff);
+ AppendToOutput("#0x%" PRIx64, imm);
+ }
+ return 8;
+ }
+ case 'L': {
+ switch (format[2]) {
+ case 'L': { // ILLiteral - Immediate Load Literal.
+ AppendToOutput("pc%+" PRId32,
+ instr->ImmLLiteral() << kLiteralEntrySizeLog2);
+ return 9;
+ }
+ case 'S': { // ILS - Immediate Load/Store.
+ if (instr->ImmLS() != 0) {
+ AppendToOutput(", #%" PRId32, instr->ImmLS());
+ }
+ return 3;
+ }
+ case 'P': { // ILPx - Immediate Load/Store Pair, x = access size.
+ if (instr->ImmLSPair() != 0) {
+ // format[3] is the scale value. Convert to a number.
+ int scale = 1 << (format[3] - '0');
+ AppendToOutput(", #%" PRId32, instr->ImmLSPair() * scale);
+ }
+ return 4;
+ }
+ case 'U': { // ILU - Immediate Load/Store Unsigned.
+ if (instr->ImmLSUnsigned() != 0) {
+ int shift = instr->SizeLS();
+ AppendToOutput(", #%" PRId32, instr->ImmLSUnsigned() << shift);
+ }
+ return 3;
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
+ case 'C': { // ICondB - Immediate Conditional Branch.
+ int64_t offset = instr->ImmCondBranch() << 2;
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ return 6;
+ }
+ case 'A': { // IAddSub.
+ VIXL_ASSERT(instr->ShiftAddSub() <= 1);
+ int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
+ AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
+ return 7;
+ }
+ case 'F': { // IFPSingle, IFPDouble or IFPFBits.
+ if (format[3] == 'F') { // IFPFbits.
+ AppendToOutput("#%" PRId32, 64 - instr->FPScale());
+ return 8;
+ } else {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmFP(),
+ format[3] == 'S' ? instr->ImmFP32() : instr->ImmFP64());
+ return 9;
+ }
+ }
+ case 'T': { // ITri - Immediate Triangular Encoded.
+ AppendToOutput("#0x%" PRIx64, instr->ImmLogical());
+ return 4;
+ }
+ case 'N': { // INzcv.
+ int nzcv = (instr->Nzcv() << Flags_offset);
+ AppendToOutput("#%c%c%c%c", ((nzcv & NFlag) == 0) ? 'n' : 'N',
+ ((nzcv & ZFlag) == 0) ? 'z' : 'Z',
+ ((nzcv & CFlag) == 0) ? 'c' : 'C',
+ ((nzcv & VFlag) == 0) ? 'v' : 'V');
+ return 5;
+ }
+ case 'P': { // IP - Conditional compare.
+ AppendToOutput("#%" PRId32, instr->ImmCondCmp());
+ return 2;
+ }
+ case 'B': { // Bitfields.
+ return SubstituteBitfieldImmediateField(instr, format);
+ }
+ case 'E': { // IExtract.
+ AppendToOutput("#%" PRId32, instr->ImmS());
+ return 8;
+ }
+ case 'S': { // IS - Test and branch bit.
+ AppendToOutput("#%" PRId32, (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40());
+ return 2;
+ }
+ case 's': { // Is - Shift (immediate).
+ switch (format[2]) {
+ case '1': { // Is1 - SSHR.
+ int shift = 16 << HighestSetBitPosition(instr->ImmNEONImmh());
+ shift -= instr->ImmNEONImmhImmb();
+ AppendToOutput("#%d", shift);
+ return 3;
+ }
+ case '2': { // Is2 - SLI.
+ int shift = instr->ImmNEONImmhImmb();
+ shift -= 8 << HighestSetBitPosition(instr->ImmNEONImmh());
+ AppendToOutput("#%d", shift);
+ return 3;
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
+ case 'D': { // IDebug - HLT and BRK instructions.
+ AppendToOutput("#0x%" PRIx32, instr->ImmException());
+ return 6;
+ }
+ case 'V': { // Immediate Vector.
+ switch (format[2]) {
+ case 'E': { // IVExtract.
+ AppendToOutput("#%" PRId32, instr->ImmNEONExt());
+ return 9;
+ }
+ case 'B': { // IVByElemIndex.
+ int vm_index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ vm_index = (vm_index << 1) | instr->NEONM();
+ }
+ AppendToOutput("%d", vm_index);
+ return strlen("IVByElemIndex");
+ }
+ case 'I': { // INS element.
+ if (strncmp(format, "IVInsIndex", strlen("IVInsIndex")) == 0) {
+ int rd_index, rn_index;
+ int imm5 = instr->ImmNEON5();
+ int imm4 = instr->ImmNEON4();
+ int tz = CountTrailingZeros(imm5, 32);
+ rd_index = imm5 >> (tz + 1);
+ rn_index = imm4 >> tz;
+ if (strncmp(format, "IVInsIndex1", strlen("IVInsIndex1")) == 0) {
+ AppendToOutput("%d", rd_index);
+ return strlen("IVInsIndex1");
+ } else if (strncmp(format, "IVInsIndex2",
+ strlen("IVInsIndex2")) == 0) {
+ AppendToOutput("%d", rn_index);
+ return strlen("IVInsIndex2");
+ } else {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ VIXL_FALLTHROUGH();
+ }
+ case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
+ AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
+ return 9;
+ }
+ case 'M': { // Modified Immediate cases.
+ if (strncmp(format,
+ "IVMIImmFPSingle",
+ strlen("IVMIImmFPSingle")) == 0) {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+ instr->ImmNEONFP32());
+ return strlen("IVMIImmFPSingle");
+ } else if (strncmp(format,
+ "IVMIImmFPDouble",
+ strlen("IVMIImmFPDouble")) == 0) {
+ AppendToOutput("#0x%" PRIx32 " (%.4f)", instr->ImmNEONabcdefgh(),
+ instr->ImmNEONFP64());
+ return strlen("IVMIImmFPDouble");
+ } else if (strncmp(format, "IVMIImm8", strlen("IVMIImm8")) == 0) {
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+ AppendToOutput("#0x%" PRIx64, imm8);
+ return strlen("IVMIImm8");
+ } else if (strncmp(format, "IVMIImm", strlen("IVMIImm")) == 0) {
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+ uint64_t imm = 0;
+ for (int i = 0; i < 8; ++i) {
+ if (imm8 & (1ULL << i)) {
+ imm |= (UINT64_C(0xff) << (8 * i));
+ }
+ }
+ AppendToOutput("#0x%" PRIx64, imm);
+ return strlen("IVMIImm");
+ } else if (strncmp(format, "IVMIShiftAmt1",
+ strlen("IVMIShiftAmt1")) == 0) {
+ int cmode = instr->NEONCmode();
+ int shift_amount = 8 * ((cmode >> 1) & 3);
+ AppendToOutput("#%d", shift_amount);
+ return strlen("IVMIShiftAmt1");
+ } else if (strncmp(format, "IVMIShiftAmt2",
+ strlen("IVMIShiftAmt2")) == 0) {
+ int cmode = instr->NEONCmode();
+ int shift_amount = 8 << (cmode & 1);
+ AppendToOutput("#%d", shift_amount);
+ return strlen("IVMIShiftAmt2");
+ } else {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+ }
+ case 'X': { // IX - CLREX instruction.
+ AppendToOutput("#0x%" PRIx32, instr->CRm());
+ return 2;
+ }
+ default: {
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteBitfieldImmediateField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT((format[0] == 'I') && (format[1] == 'B'));
+ unsigned r = instr->ImmR();
+ unsigned s = instr->ImmS();
+
+ switch (format[2]) {
+ case 'r': { // IBr.
+ AppendToOutput("#%d", r);
+ return 3;
+ }
+ case 's': { // IBs+1 or IBs-r+1.
+ if (format[3] == '+') {
+ AppendToOutput("#%d", s + 1);
+ return 5;
+ } else {
+ VIXL_ASSERT(format[3] == '-');
+ AppendToOutput("#%d", s - r + 1);
+ return 7;
+ }
+ }
+ case 'Z': { // IBZ-r.
+ VIXL_ASSERT((format[3] == '-') && (format[4] == 'r'));
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize : kWRegSize;
+ AppendToOutput("#%d", reg_size - r);
+ return 5;
+ }
+ default: {
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+ }
+}
+
+
+int Disassembler::SubstituteLiteralField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
+ USE(format);
+
+ const void * address = instr->LiteralAddress<const void *>();
+ switch (instr->Mask(LoadLiteralMask)) {
+ case LDR_w_lit:
+ case LDR_x_lit:
+ case LDRSW_x_lit:
+ case LDR_s_lit:
+ case LDR_d_lit:
+ case LDR_q_lit:
+ AppendCodeRelativeDataAddressToOutput(instr, address);
+ break;
+ case PRFM_lit: {
+ // Use the prefetch hint to decide how to print the address.
+ switch (instr->PrefetchHint()) {
+ case 0x0: // PLD: prefetch for load.
+ case 0x2: // PST: prepare for store.
+ AppendCodeRelativeDataAddressToOutput(instr, address);
+ break;
+ case 0x1: // PLI: preload instructions.
+ AppendCodeRelativeCodeAddressToOutput(instr, address);
+ break;
+ case 0x3: // Unallocated hint.
+ AppendCodeRelativeAddressToOutput(instr, address);
+ break;
+ }
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ return 6;
+}
+
+
+int Disassembler::SubstituteShiftField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'N');
+ VIXL_ASSERT(instr->ShiftDP() <= 0x3);
+
+ switch (format[1]) {
+ case 'D': { // HDP.
+ VIXL_ASSERT(instr->ShiftDP() != ROR);
+ VIXL_FALLTHROUGH();
+ }
+ case 'L': { // HLo.
+ if (instr->ImmDPShift() != 0) {
+ const char* shift_type[] = {"lsl", "lsr", "asr", "ror"};
+ AppendToOutput(", %s #%" PRId32, shift_type[instr->ShiftDP()],
+ instr->ImmDPShift());
+ }
+ return 3;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int Disassembler::SubstituteConditionField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'C');
+ const char* condition_code[] = { "eq", "ne", "hs", "lo",
+ "mi", "pl", "vs", "vc",
+ "hi", "ls", "ge", "lt",
+ "gt", "le", "al", "nv" };
+ int cond;
+ switch (format[1]) {
+ case 'B': cond = instr->ConditionBranch(); break;
+ case 'I': {
+ cond = InvertCondition(static_cast<Condition>(instr->Condition()));
+ break;
+ }
+ default: cond = instr->Condition();
+ }
+ AppendToOutput("%s", condition_code[cond]);
+ return 4;
+}
+
+
+int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT((strcmp(format, "AddrPCRelByte") == 0) || // Used by `adr`.
+ (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`.
+
+ int64_t offset = instr->ImmPCRel();
+
+ // Compute the target address based on the effective address (after applying
+ // code_address_offset). This is required for correct behaviour of adrp.
+ const Instruction* base = instr + code_address_offset();
+ if (format[9] == 'P') {
+ offset *= kPageSize;
+ base = AlignDown(base, kPageSize);
+ }
+ // Strip code_address_offset before printing, so we can use the
+ // semantically-correct AppendCodeRelativeAddressToOutput.
+ const void* target =
+ reinterpret_cast<const void*>(base + offset - code_address_offset());
+
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ AppendToOutput(" ");
+ AppendCodeRelativeAddressToOutput(instr, target);
+ return 13;
+}
+
+
+int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "TImm", 4) == 0);
+
+ int64_t offset = 0;
+ switch (format[5]) {
+ // BImmUncn - unconditional branch immediate.
+ case 'n': offset = instr->ImmUncondBranch(); break;
+ // BImmCond - conditional branch immediate.
+ case 'o': offset = instr->ImmCondBranch(); break;
+ // BImmCmpa - compare and branch immediate.
+ case 'm': offset = instr->ImmCmpBranch(); break;
+ // BImmTest - test and branch immediate.
+ case 'e': offset = instr->ImmTestBranch(); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ offset <<= kInstructionSizeLog2;
+ const void* target_address = reinterpret_cast<const void*>(instr + offset);
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+
+ AppendPCRelativeOffsetToOutput(instr, offset);
+ AppendToOutput(" ");
+ AppendCodeRelativeCodeAddressToOutput(instr, target_address);
+
+ return 8;
+}
+
+
+int Disassembler::SubstituteExtendField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "Ext", 3) == 0);
+ VIXL_ASSERT(instr->ExtendMode() <= 7);
+ USE(format);
+
+ const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
+ "sxtb", "sxth", "sxtw", "sxtx" };
+
+ // If rd or rn is SP, uxtw on 32-bit registers and uxtx on 64-bit
+ // registers becomes lsl.
+ if (((instr->Rd() == kZeroRegCode) || (instr->Rn() == kZeroRegCode)) &&
+ (((instr->ExtendMode() == UXTW) && (instr->SixtyFourBits() == 0)) ||
+ (instr->ExtendMode() == UXTX))) {
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(", lsl #%" PRId32, instr->ImmExtendShift());
+ }
+ } else {
+ AppendToOutput(", %s", extend_mode[instr->ExtendMode()]);
+ if (instr->ImmExtendShift() > 0) {
+ AppendToOutput(" #%" PRId32, instr->ImmExtendShift());
+ }
+ }
+ return 3;
+}
+
+
+int Disassembler::SubstituteLSRegOffsetField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+ const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
+ "undefined", "undefined", "sxtw", "sxtx" };
+ USE(format);
+
+ unsigned shift = instr->ImmShiftLS();
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ char reg_type = ((ext == UXTW) || (ext == SXTW)) ? 'w' : 'x';
+
+ unsigned rm = instr->Rm();
+ if (rm == kZeroRegCode) {
+ AppendToOutput("%czr", reg_type);
+ } else {
+ AppendToOutput("%c%d", reg_type, rm);
+ }
+
+ // Extend mode UXTX is an alias for shift mode LSL here.
+ if (!((ext == UXTX) && (shift == 0))) {
+ AppendToOutput(", %s", extend_mode[ext]);
+ if (shift != 0) {
+ AppendToOutput(" #%d", instr->SizeLS());
+ }
+ }
+ return 9;
+}
+
+
+int Disassembler::SubstitutePrefetchField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'P');
+ USE(format);
+
+ static const char* hints[] = {"ld", "li", "st"};
+ static const char* stream_options[] = {"keep", "strm"};
+
+ unsigned hint = instr->PrefetchHint();
+ unsigned target = instr->PrefetchTarget() + 1;
+ unsigned stream = instr->PrefetchStream();
+
+ if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
+ // Unallocated prefetch operations.
+ int prefetch_mode = instr->ImmPrefetchOperation();
+ AppendToOutput("#0b%c%c%c%c%c",
+ (prefetch_mode & (1 << 4)) ? '1' : '0',
+ (prefetch_mode & (1 << 3)) ? '1' : '0',
+ (prefetch_mode & (1 << 2)) ? '1' : '0',
+ (prefetch_mode & (1 << 1)) ? '1' : '0',
+ (prefetch_mode & (1 << 0)) ? '1' : '0');
+ } else {
+ VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
+ AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
+ }
+ return 6;
+}
+
+int Disassembler::SubstituteBarrierField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'M');
+ USE(format);
+
+ static const char* options[4][4] = {
+ { "sy (0b0000)", "oshld", "oshst", "osh" },
+ { "sy (0b0100)", "nshld", "nshst", "nsh" },
+ { "sy (0b1000)", "ishld", "ishst", "ish" },
+ { "sy (0b1100)", "ld", "st", "sy" }
+ };
+ int domain = instr->ImmBarrierDomain();
+ int type = instr->ImmBarrierType();
+
+ AppendToOutput("%s", options[domain][type]);
+ return 1;
+}
+
+int Disassembler::SubstituteSysOpField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'G');
+ int op = -1;
+ switch (format[1]) {
+ case '1': op = instr->SysOp1(); break;
+ case '2': op = instr->SysOp2(); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ AppendToOutput("#%d", op);
+ return 2;
+}
+
+int Disassembler::SubstituteCrField(const Instruction* instr,
+ const char* format) {
+ VIXL_ASSERT(format[0] == 'K');
+ int cr = -1;
+ switch (format[1]) {
+ case 'n': cr = instr->CRn(); break;
+ case 'm': cr = instr->CRm(); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ AppendToOutput("C%d", cr);
+ return 2;
+}
+
+void Disassembler::ResetOutput() {
+ buffer_pos_ = 0;
+ buffer_[buffer_pos_] = 0;
+}
+
+
+void Disassembler::AppendToOutput(const char* format, ...) {
+ va_list args;
+ va_start(args, format);
+ buffer_pos_ += vsnprintf(&buffer_[buffer_pos_], buffer_size_ - buffer_pos_,
+ format, args);
+ va_end(args);
+}
+
+
+void PrintDisassembler::ProcessOutput(const Instruction* instr) {
+ fprintf(stream_, "0x%016" PRIx64 " %08" PRIx32 "\t\t%s\n",
+ reinterpret_cast<uint64_t>(instr),
+ instr->InstructionBits(),
+ GetOutput());
+}
+
+void DisassembleInstruction(char* buffer, size_t bufsize, const Instruction* instr)
+{
+ vixl::Disassembler disasm(buffer, bufsize-1);
+ vixl::Decoder decoder;
+ decoder.AppendVisitor(&disasm);
+ decoder.Decode(instr);
+ buffer[bufsize-1] = 0; // Just to be safe
+}
+
+char* GdbDisassembleInstruction(const Instruction* instr)
+{
+ static char buffer[1024];
+ DisassembleInstruction(buffer, sizeof(buffer), instr);
+ return buffer;
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Disasm-vixl.h b/js/src/jit/arm64/vixl/Disasm-vixl.h
new file mode 100644
index 0000000000..e04730da83
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Disasm-vixl.h
@@ -0,0 +1,181 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_DISASM_A64_H
+#define VIXL_A64_DISASM_A64_H
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Decoder-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+
+class Disassembler: public DecoderVisitor {
+ public:
+ Disassembler();
+ Disassembler(char* text_buffer, int buffer_size);
+ virtual ~Disassembler();
+ char* GetOutput();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr) override;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ protected:
+ virtual void ProcessOutput(const Instruction* instr);
+
+ // Default output functions. The functions below implement a default way of
+ // printing elements in the disassembly. A sub-class can override these to
+ // customize the disassembly output.
+
+ // Prints the name of a register.
+ // TODO: This currently doesn't allow renaming of V registers.
+ virtual void AppendRegisterNameToOutput(const Instruction* instr,
+ const CPURegister& reg);
+
+ // Prints a PC-relative offset. This is used for example when disassembling
+ // branches to immediate offsets.
+ virtual void AppendPCRelativeOffsetToOutput(const Instruction* instr,
+ int64_t offset);
+
+ // Prints an address, in the general case. It can be code or data. This is
+ // used for example to print the target address of an ADR instruction.
+ virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Prints the address of some code.
+ // This is used for example to print the target address of a branch to an
+ // immediate offset.
+ // A sub-class can for example override this method to lookup the address and
+ // print an appropriate name.
+ virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Prints the address of some data.
+ // This is used for example to print the source address of a load literal
+ // instruction.
+ virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ // Same as the above, but for addresses that are not relative to the code
+ // buffer. They are currently not used by VIXL.
+ virtual void AppendAddressToOutput(const Instruction* instr,
+ const void* addr);
+ virtual void AppendCodeAddressToOutput(const Instruction* instr,
+ const void* addr);
+ virtual void AppendDataAddressToOutput(const Instruction* instr,
+ const void* addr);
+
+ public:
+ // Get/Set the offset that should be added to code addresses when printing
+ // code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
+ // helpers.
+ // Below is an example of how a branch immediate instruction in memory at
+ // address 0xb010200 would disassemble with different offsets.
+ // Base address | Disassembly
+ // 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
+ // 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
+ // 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
+ void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
+ int64_t CodeRelativeAddress(const void* instr);
+
+ private:
+ void Format(
+ const Instruction* instr, const char* mnemonic, const char* format);
+ void Substitute(const Instruction* instr, const char* string);
+ int SubstituteField(const Instruction* instr, const char* format);
+ int SubstituteRegisterField(const Instruction* instr, const char* format);
+ int SubstituteImmediateField(const Instruction* instr, const char* format);
+ int SubstituteLiteralField(const Instruction* instr, const char* format);
+ int SubstituteBitfieldImmediateField(
+ const Instruction* instr, const char* format);
+ int SubstituteShiftField(const Instruction* instr, const char* format);
+ int SubstituteExtendField(const Instruction* instr, const char* format);
+ int SubstituteConditionField(const Instruction* instr, const char* format);
+ int SubstitutePCRelAddressField(const Instruction* instr, const char* format);
+ int SubstituteBranchTargetField(const Instruction* instr, const char* format);
+ int SubstituteLSRegOffsetField(const Instruction* instr, const char* format);
+ int SubstitutePrefetchField(const Instruction* instr, const char* format);
+ int SubstituteBarrierField(const Instruction* instr, const char* format);
+ int SubstituteSysOpField(const Instruction* instr, const char* format);
+ int SubstituteCrField(const Instruction* instr, const char* format);
+ bool RdIsZROrSP(const Instruction* instr) const {
+ return (instr->Rd() == kZeroRegCode);
+ }
+
+ bool RnIsZROrSP(const Instruction* instr) const {
+ return (instr->Rn() == kZeroRegCode);
+ }
+
+ bool RmIsZROrSP(const Instruction* instr) const {
+ return (instr->Rm() == kZeroRegCode);
+ }
+
+ bool RaIsZROrSP(const Instruction* instr) const {
+ return (instr->Ra() == kZeroRegCode);
+ }
+
+ bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
+
+ int64_t code_address_offset() const { return code_address_offset_; }
+
+ protected:
+ void ResetOutput();
+ void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
+
+ void set_code_address_offset(int64_t code_address_offset) {
+ code_address_offset_ = code_address_offset;
+ }
+
+ char* buffer_;
+ uint32_t buffer_pos_;
+ uint32_t buffer_size_;
+ bool own_buffer_;
+
+ int64_t code_address_offset_;
+};
+
+
+class PrintDisassembler: public Disassembler {
+ public:
+ explicit PrintDisassembler(FILE* stream) : stream_(stream) { }
+
+ protected:
+ virtual void ProcessOutput(const Instruction* instr) override;
+
+ private:
+ FILE *stream_;
+};
+
+void DisassembleInstruction(char* buffer, size_t bufsize, const Instruction* instr);
+char* GdbDisassembleInstruction(const Instruction* instr);
+
+} // namespace vixl
+
+#endif // VIXL_A64_DISASM_A64_H
diff --git a/js/src/jit/arm64/vixl/Globals-vixl.h b/js/src/jit/arm64/vixl/Globals-vixl.h
new file mode 100644
index 0000000000..2c7d5703f1
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Globals-vixl.h
@@ -0,0 +1,272 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_GLOBALS_H
+#define VIXL_GLOBALS_H
+
+// Get standard C99 macros for integer types.
+#ifndef __STDC_CONSTANT_MACROS
+#define __STDC_CONSTANT_MACROS
+#endif
+
+#ifndef __STDC_LIMIT_MACROS
+#define __STDC_LIMIT_MACROS
+#endif
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+
+#include "mozilla/Assertions.h"
+
+#include <cstdarg>
+#include <cstddef>
+#include <cstdio>
+#include <cstdlib>
+
+extern "C" {
+#include <inttypes.h>
+#include <stdint.h>
+}
+
+#include "jstypes.h"
+
+#include "jit/arm64/vixl/Platform-vixl.h"
+#include "js/Utility.h"
+
+#ifdef VIXL_NEGATIVE_TESTING
+#include <sstream>
+#include <stdexcept>
+#include <string>
+#endif
+
+namespace vixl {
+
+typedef uint8_t byte;
+
+const int KBytes = 1024;
+const int MBytes = 1024 * KBytes;
+
+const int kBitsPerByte = 8;
+
+template <int SizeInBits>
+struct Unsigned;
+
+template <>
+struct Unsigned<32> {
+ typedef uint32_t type;
+};
+
+template <>
+struct Unsigned<64> {
+ typedef uint64_t type;
+};
+
+} // namespace vixl
+
+// Detect the host's pointer size.
+#if (UINTPTR_MAX == UINT32_MAX)
+#define VIXL_HOST_POINTER_32
+#elif (UINTPTR_MAX == UINT64_MAX)
+#define VIXL_HOST_POINTER_64
+#else
+#error "Unsupported host pointer size."
+#endif
+
+#ifdef VIXL_NEGATIVE_TESTING
+#define VIXL_ABORT() \
+ do { \
+ std::ostringstream oss; \
+ oss << "Aborting in " << __FILE__ << ", line " << __LINE__ << std::endl; \
+ throw std::runtime_error(oss.str()); \
+ } while (false)
+#define VIXL_ABORT_WITH_MSG(msg) \
+ do { \
+ std::ostringstream oss; \
+ oss << (msg) << "in " << __FILE__ << ", line " << __LINE__ << std::endl; \
+ throw std::runtime_error(oss.str()); \
+ } while (false)
+#define VIXL_CHECK(condition) \
+ do { \
+ if (!(condition)) { \
+ std::ostringstream oss; \
+ oss << "Assertion failed (" #condition ")\nin "; \
+ oss << __FILE__ << ", line " << __LINE__ << std::endl; \
+ throw std::runtime_error(oss.str()); \
+ } \
+ } while (false)
+#else
+#define VIXL_ABORT() \
+ do { \
+ MOZ_CRASH(); \
+ } while (false)
+#define VIXL_ABORT_WITH_MSG(msg) \
+ do { \
+ MOZ_CRASH(msg); \
+ } while (false)
+#define VIXL_CHECK(condition) \
+ do { \
+ if (!(condition)) { \
+ MOZ_CRASH(); \
+ } \
+ } while (false)
+#endif
+#ifdef DEBUG
+#define VIXL_ASSERT(condition) MOZ_ASSERT(condition)
+#define VIXL_UNIMPLEMENTED() \
+ do { \
+ VIXL_ABORT_WITH_MSG("UNIMPLEMENTED "); \
+ } while (false)
+#define VIXL_UNREACHABLE() \
+ do { \
+ VIXL_ABORT_WITH_MSG("UNREACHABLE "); \
+ } while (false)
+#else
+#define VIXL_ASSERT(condition) ((void)0)
+#define VIXL_UNIMPLEMENTED() ((void)0)
+#define VIXL_UNREACHABLE() MOZ_CRASH("vixl unreachable")
+#endif
+// This is not as powerful as template based assertions, but it is simple.
+// It assumes that the descriptions are unique. If this starts being a problem,
+// we can switch to a different implemention.
+#define VIXL_CONCAT(a, b) a##b
+#if __cplusplus >= 201103L
+#define VIXL_STATIC_ASSERT_LINE(line_unused, condition, message) \
+ static_assert(condition, message)
+#else
+#define VIXL_STATIC_ASSERT_LINE(line, condition, message_unused) \
+ typedef char VIXL_CONCAT(STATIC_ASSERT_LINE_, line)[(condition) ? 1 : -1] \
+ __attribute__((unused))
+#endif
+#define VIXL_STATIC_ASSERT(condition) \
+ VIXL_STATIC_ASSERT_LINE(__LINE__, condition, "")
+#define VIXL_STATIC_ASSERT_MESSAGE(condition, message) \
+ VIXL_STATIC_ASSERT_LINE(__LINE__, condition, message)
+
+#define VIXL_WARNING(message) \
+ do { \
+ printf("WARNING in %s, line %i: %s", __FILE__, __LINE__, message); \
+ } while (false)
+
+template <typename T1>
+inline void USE(const T1&) {}
+
+template <typename T1, typename T2>
+inline void USE(const T1&, const T2&) {}
+
+template <typename T1, typename T2, typename T3>
+inline void USE(const T1&, const T2&, const T3&) {}
+
+template <typename T1, typename T2, typename T3, typename T4>
+inline void USE(const T1&, const T2&, const T3&, const T4&) {}
+
+#define VIXL_ALIGNMENT_EXCEPTION() \
+ do { \
+ VIXL_ABORT_WITH_MSG("ALIGNMENT EXCEPTION\t"); \
+ } while (0)
+
+// The clang::fallthrough attribute is used along with the Wimplicit-fallthrough
+// argument to annotate intentional fall-through between switch labels.
+// For more information please refer to:
+// http://clang.llvm.org/docs/AttributeReference.html#fallthrough-clang-fallthrough
+#ifndef __has_warning
+#define __has_warning(x) 0
+#endif
+
+// Fallthrough annotation for Clang and C++11(201103L).
+#if __has_warning("-Wimplicit-fallthrough") && __cplusplus >= 201103L
+#define VIXL_FALLTHROUGH() [[clang::fallthrough]]
+// Fallthrough annotation for GCC >= 7.
+#elif __GNUC__ >= 7
+#define VIXL_FALLTHROUGH() __attribute__((fallthrough))
+#else
+#define VIXL_FALLTHROUGH() \
+ do { \
+ } while (0)
+#endif
+
+#if __cplusplus >= 201103L
+#define VIXL_NO_RETURN [[noreturn]]
+#else
+#define VIXL_NO_RETURN __attribute__((noreturn))
+#endif
+#ifdef VIXL_DEBUG
+#define VIXL_NO_RETURN_IN_DEBUG_MODE VIXL_NO_RETURN
+#else
+#define VIXL_NO_RETURN_IN_DEBUG_MODE
+#endif
+
+#if __cplusplus >= 201103L
+#define VIXL_OVERRIDE override
+#else
+#define VIXL_OVERRIDE
+#endif
+
+#ifdef VIXL_INCLUDE_SIMULATOR_AARCH64
+#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 1
+#endif
+#else
+#ifndef VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH64_GENERATE_SIMULATOR_CODE 0
+#endif
+#if VIXL_AARCH64_GENERATE_SIMULATOR_CODE
+#warning "Generating Simulator instructions without Simulator support."
+#endif
+#endif
+
+// We do not have a simulator for AArch32, although we can pretend we do so that
+// tests that require running natively can be skipped.
+#ifndef __arm__
+#define VIXL_INCLUDE_SIMULATOR_AARCH32
+#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 1
+#endif
+#else
+#ifndef VIXL_AARCH32_GENERATE_SIMULATOR_CODE
+#define VIXL_AARCH32_GENERATE_SIMULATOR_CODE 0
+#endif
+#endif
+
+// Target Architecture/ISA
+
+// Hack: always include AArch64.
+#define VIXL_INCLUDE_TARGET_A64
+
+#ifdef VIXL_INCLUDE_TARGET_A64
+#define VIXL_INCLUDE_TARGET_AARCH64
+#endif
+
+#if defined(VIXL_INCLUDE_TARGET_A32) && defined(VIXL_INCLUDE_TARGET_T32)
+#define VIXL_INCLUDE_TARGET_AARCH32
+#elif defined(VIXL_INCLUDE_TARGET_A32)
+#define VIXL_INCLUDE_TARGET_A32_ONLY
+#else
+#define VIXL_INCLUDE_TARGET_T32_ONLY
+#endif
+
+
+#endif // VIXL_GLOBALS_H
diff --git a/js/src/jit/arm64/vixl/Instructions-vixl.cpp b/js/src/jit/arm64/vixl/Instructions-vixl.cpp
new file mode 100644
index 0000000000..887f12d0f2
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.cpp
@@ -0,0 +1,642 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+
+namespace vixl {
+
+static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
+ uint64_t value,
+ unsigned width) {
+ VIXL_ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+ (width == 32));
+ VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
+ uint64_t result = value & ((UINT64_C(1) << width) - 1);
+ for (unsigned i = width; i < reg_size; i *= 2) {
+ result |= (result << i);
+ }
+ return result;
+}
+
+
+bool Instruction::IsLoad() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) != 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
+ switch (op) {
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_w:
+ case LDRSB_x:
+ case LDRSH_w:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDR_b:
+ case LDR_h:
+ case LDR_s:
+ case LDR_d:
+ case LDR_q: return true;
+ default: return false;
+ }
+ }
+}
+
+
+bool Instruction::IsStore() const {
+ if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
+ return false;
+ }
+
+ if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
+ return Mask(LoadStorePairLBit) == 0;
+ } else {
+ LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreMask));
+ switch (op) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ case STR_x:
+ case STR_b:
+ case STR_h:
+ case STR_s:
+ case STR_d:
+ case STR_q: return true;
+ default: return false;
+ }
+ }
+}
+
+
+// Logical immediates can't encode zero, so a return value of zero is used to
+// indicate a failure case. Specifically, where the constraints on imm_s are
+// not met.
+uint64_t Instruction::ImmLogical() const {
+ unsigned reg_size = SixtyFourBits() ? kXRegSize : kWRegSize;
+ int32_t n = BitN();
+ int32_t imm_s = ImmSetBits();
+ int32_t imm_r = ImmRotate();
+
+ // An integer is constructed from the n, imm_s and imm_r bits according to
+ // the following table:
+ //
+ // N imms immr size S R
+ // 1 ssssss rrrrrr 64 UInt(ssssss) UInt(rrrrrr)
+ // 0 0sssss xrrrrr 32 UInt(sssss) UInt(rrrrr)
+ // 0 10ssss xxrrrr 16 UInt(ssss) UInt(rrrr)
+ // 0 110sss xxxrrr 8 UInt(sss) UInt(rrr)
+ // 0 1110ss xxxxrr 4 UInt(ss) UInt(rr)
+ // 0 11110s xxxxxr 2 UInt(s) UInt(r)
+ // (s bits must not be all set)
+ //
+ // A pattern is constructed of size bits, where the least significant S+1
+ // bits are set. The pattern is rotated right by R, and repeated across a
+ // 32 or 64-bit value, depending on destination register width.
+ //
+
+ if (n == 1) {
+ if (imm_s == 0x3f) {
+ return 0;
+ }
+ uint64_t bits = (UINT64_C(1) << (imm_s + 1)) - 1;
+ return RotateRight(bits, imm_r, 64);
+ } else {
+ if ((imm_s >> 1) == 0x1f) {
+ return 0;
+ }
+ for (int width = 0x20; width >= 0x2; width >>= 1) {
+ if ((imm_s & width) == 0) {
+ int mask = width - 1;
+ if ((imm_s & mask) == mask) {
+ return 0;
+ }
+ uint64_t bits = (UINT64_C(1) << ((imm_s & mask) + 1)) - 1;
+ return RepeatBitsAcrossReg(reg_size,
+ RotateRight(bits, imm_r & mask, width),
+ width);
+ }
+ }
+ }
+ VIXL_UNREACHABLE();
+ return 0;
+}
+
+
+uint32_t Instruction::ImmNEONabcdefgh() const {
+ return ImmNEONabc() << 5 | ImmNEONdefgh();
+}
+
+
+float Instruction::Imm8ToFP32(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return RawbitsToFloat(result);
+}
+
+
+float Instruction::ImmFP32() const {
+ return Imm8ToFP32(ImmFP());
+}
+
+
+double Instruction::Imm8ToFP64(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+
+ return RawbitsToDouble(result);
+}
+
+
+double Instruction::ImmFP64() const {
+ return Imm8ToFP64(ImmFP());
+}
+
+
+float Instruction::ImmNEONFP32() const {
+ return Imm8ToFP32(ImmNEONabcdefgh());
+}
+
+
+double Instruction::ImmNEONFP64() const {
+ return Imm8ToFP64(ImmNEONabcdefgh());
+}
+
+
+unsigned CalcLSDataSize(LoadStoreOp op) {
+ VIXL_ASSERT((LSSize_offset + LSSize_width) == (kInstructionSize * 8));
+ unsigned size = static_cast<Instr>(op) >> LSSize_offset;
+ if ((op & LSVector_mask) != 0) {
+ // Vector register memory operations encode the access size in the "size"
+ // and "opc" fields.
+ if ((size == 0) && ((op & LSOpc_mask) >> LSOpc_offset) >= 2) {
+ size = kQRegSizeInBytesLog2;
+ }
+ }
+ return size;
+}
+
+
+unsigned CalcLSPairDataSize(LoadStorePairOp op) {
+ VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
+ switch (op) {
+ case STP_q:
+ case LDP_q: return kQRegSizeInBytesLog2;
+ case STP_x:
+ case LDP_x:
+ case STP_d:
+ case LDP_d: return kXRegSizeInBytesLog2;
+ default: return kWRegSizeInBytesLog2;
+ }
+}
+
+
+int Instruction::ImmBranchRangeBitwidth(ImmBranchType branch_type) {
+ switch (branch_type) {
+ case UncondBranchType:
+ return ImmUncondBranch_width;
+ case CondBranchType:
+ return ImmCondBranch_width;
+ case CompareBranchType:
+ return ImmCmpBranch_width;
+ case TestBranchType:
+ return ImmTestBranch_width;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+
+int32_t Instruction::ImmBranchForwardRange(ImmBranchType branch_type) {
+ int32_t encoded_max = 1 << (ImmBranchRangeBitwidth(branch_type) - 1);
+ return encoded_max * kInstructionSize;
+}
+
+
+bool Instruction::IsValidImmPCOffset(ImmBranchType branch_type,
+ int64_t offset) {
+ return IsIntN(ImmBranchRangeBitwidth(branch_type), offset);
+}
+
+ImmBranchRangeType Instruction::ImmBranchTypeToRange(ImmBranchType branch_type)
+{
+ switch (branch_type) {
+ case UncondBranchType:
+ return UncondBranchRangeType;
+ case CondBranchType:
+ case CompareBranchType:
+ return CondBranchRangeType;
+ case TestBranchType:
+ return TestBranchRangeType;
+ default:
+ return UnknownBranchRangeType;
+ }
+}
+
+int32_t Instruction::ImmBranchMaxForwardOffset(ImmBranchRangeType range_type)
+{
+ // Branches encode a pc-relative two's complement number of 32-bit
+ // instructions. Compute the number of bytes corresponding to the largest
+ // positive number of instructions that can be encoded.
+ switch(range_type) {
+ case TestBranchRangeType:
+ return ((1 << ImmTestBranch_width) - 1) / 2 * kInstructionSize;
+ case CondBranchRangeType:
+ return ((1 << ImmCondBranch_width) - 1) / 2 * kInstructionSize;
+ case UncondBranchRangeType:
+ return ((1 << ImmUncondBranch_width) - 1) / 2 * kInstructionSize;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+int32_t Instruction::ImmBranchMinBackwardOffset(ImmBranchRangeType range_type)
+{
+ switch(range_type) {
+ case TestBranchRangeType:
+ return -int32_t(1 << ImmTestBranch_width) / int32_t(2 * kInstructionSize);
+ case CondBranchRangeType:
+ return -int32_t(1 << ImmCondBranch_width) / int32_t(2 * kInstructionSize);
+ case UncondBranchRangeType:
+ return -int32_t(1 << ImmUncondBranch_width) / int32_t(2 * kInstructionSize);
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+}
+
+const Instruction* Instruction::ImmPCOffsetTarget() const {
+ const Instruction * base = this;
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP.
+ offset = ImmPCRel();
+ if (Mask(PCRelAddressingMask) == ADRP) {
+ base = AlignDown(base, kPageSize);
+ offset *= kPageSize;
+ } else {
+ VIXL_ASSERT(Mask(PCRelAddressingMask) == ADR);
+ }
+ } else {
+ // All PC-relative branches.
+ VIXL_ASSERT(BranchType() != UnknownBranchType);
+ // Relative branch offsets are instruction-size-aligned.
+ offset = ImmBranch() << kInstructionSizeLog2;
+ }
+ return base + offset;
+}
+
+
+int Instruction::ImmBranch() const {
+ switch (BranchType()) {
+ case CondBranchType: return ImmCondBranch();
+ case UncondBranchType: return ImmUncondBranch();
+ case CompareBranchType: return ImmCmpBranch();
+ case TestBranchType: return ImmTestBranch();
+ default: VIXL_UNREACHABLE();
+ }
+ return 0;
+}
+
+
+void Instruction::SetImmPCOffsetTarget(const Instruction* target) {
+ if (IsPCRelAddressing()) {
+ SetPCRelImmTarget(target);
+ } else {
+ SetBranchImmTarget(target);
+ }
+}
+
+
+void Instruction::SetPCRelImmTarget(const Instruction* target) {
+ ptrdiff_t imm21;
+ if ((Mask(PCRelAddressingMask) == ADR)) {
+ imm21 = target - this;
+ } else {
+ VIXL_ASSERT(Mask(PCRelAddressingMask) == ADRP);
+ uintptr_t this_page = reinterpret_cast<uintptr_t>(this) / kPageSize;
+ uintptr_t target_page = reinterpret_cast<uintptr_t>(target) / kPageSize;
+ imm21 = target_page - this_page;
+ }
+ Instr imm = Assembler::ImmPCRelAddress(static_cast<int32_t>(imm21));
+
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+}
+
+
+void Instruction::SetBranchImmTarget(const Instruction* target) {
+ VIXL_ASSERT(((target - this) & 3) == 0);
+ Instr branch_imm = 0;
+ uint32_t imm_mask = 0;
+ int offset = static_cast<int>((target - this) >> kInstructionSizeLog2);
+ switch (BranchType()) {
+ case CondBranchType: {
+ branch_imm = Assembler::ImmCondBranch(offset);
+ imm_mask = ImmCondBranch_mask;
+ break;
+ }
+ case UncondBranchType: {
+ branch_imm = Assembler::ImmUncondBranch(offset);
+ imm_mask = ImmUncondBranch_mask;
+ break;
+ }
+ case CompareBranchType: {
+ branch_imm = Assembler::ImmCmpBranch(offset);
+ imm_mask = ImmCmpBranch_mask;
+ break;
+ }
+ case TestBranchType: {
+ branch_imm = Assembler::ImmTestBranch(offset);
+ imm_mask = ImmTestBranch_mask;
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+ SetInstructionBits(Mask(~imm_mask) | branch_imm);
+}
+
+
+void Instruction::SetImmLLiteral(const Instruction* source) {
+ VIXL_ASSERT(IsWordAligned(source));
+ ptrdiff_t offset = (source - this) >> kLiteralEntrySizeLog2;
+ Instr imm = Assembler::ImmLLiteral(static_cast<int>(offset));
+ Instr mask = ImmLLiteral_mask;
+
+ SetInstructionBits(Mask(~mask) | imm);
+}
+
+
+VectorFormat VectorFormatHalfWidth(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8H || vform == kFormat4S || vform == kFormat2D ||
+ vform == kFormatH || vform == kFormatS || vform == kFormatD);
+ switch (vform) {
+ case kFormat8H: return kFormat8B;
+ case kFormat4S: return kFormat4H;
+ case kFormat2D: return kFormat2S;
+ case kFormatH: return kFormatB;
+ case kFormatS: return kFormatH;
+ case kFormatD: return kFormatS;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatDoubleWidth(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S ||
+ vform == kFormatB || vform == kFormatH || vform == kFormatS);
+ switch (vform) {
+ case kFormat8B: return kFormat8H;
+ case kFormat4H: return kFormat4S;
+ case kFormat2S: return kFormat2D;
+ case kFormatB: return kFormatH;
+ case kFormatH: return kFormatS;
+ case kFormatS: return kFormatD;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatFillQ(const VectorFormat vform) {
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return kFormat16B;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return kFormat8H;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return kFormat4S;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return kFormat2D;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform) {
+ switch (vform) {
+ case kFormat4H: return kFormat8B;
+ case kFormat8H: return kFormat16B;
+ case kFormat2S: return kFormat4H;
+ case kFormat4S: return kFormat8H;
+ case kFormat1D: return kFormat2S;
+ case kFormat2D: return kFormat4S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+VectorFormat VectorFormatDoubleLanes(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat8B || vform == kFormat4H || vform == kFormat2S);
+ switch (vform) {
+ case kFormat8B: return kFormat16B;
+ case kFormat4H: return kFormat8H;
+ case kFormat2S: return kFormat4S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat VectorFormatHalfLanes(const VectorFormat vform) {
+ VIXL_ASSERT(vform == kFormat16B || vform == kFormat8H || vform == kFormat4S);
+ switch (vform) {
+ case kFormat16B: return kFormat8B;
+ case kFormat8H: return kFormat4H;
+ case kFormat4S: return kFormat2S;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+VectorFormat ScalarFormatFromLaneSize(int laneSize) {
+ switch (laneSize) {
+ case 8: return kFormatB;
+ case 16: return kFormatH;
+ case 32: return kFormatS;
+ case 64: return kFormatD;
+ default: VIXL_UNREACHABLE(); return kFormatUndefined;
+ }
+}
+
+
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB: return kBRegSize;
+ case kFormatH: return kHRegSize;
+ case kFormatS: return kSRegSize;
+ case kFormatD: return kDRegSize;
+ case kFormat8B:
+ case kFormat4H:
+ case kFormat2S:
+ case kFormat1D: return kDRegSize;
+ default: return kQRegSize;
+ }
+}
+
+
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform) {
+ return RegisterSizeInBitsFromFormat(vform) / 8;
+}
+
+
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 8;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 16;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 32;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 64;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int LaneSizeInBytesFromFormat(VectorFormat vform) {
+ return LaneSizeInBitsFromFormat(vform) / 8;
+}
+
+
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 0;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 1;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 2;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 3;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int LaneCountFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormat16B: return 16;
+ case kFormat8B:
+ case kFormat8H: return 8;
+ case kFormat4H:
+ case kFormat4S: return 4;
+ case kFormat2S:
+ case kFormat2D: return 2;
+ case kFormat1D:
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD: return 1;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+int MaxLaneCountFromFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormat8B:
+ case kFormat16B: return 16;
+ case kFormatH:
+ case kFormat4H:
+ case kFormat8H: return 8;
+ case kFormatS:
+ case kFormat2S:
+ case kFormat4S: return 4;
+ case kFormatD:
+ case kFormat1D:
+ case kFormat2D: return 2;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+}
+
+
+// Does 'vform' indicate a vector format or a scalar format?
+bool IsVectorFormat(VectorFormat vform) {
+ VIXL_ASSERT(vform != kFormatUndefined);
+ switch (vform) {
+ case kFormatB:
+ case kFormatH:
+ case kFormatS:
+ case kFormatD: return false;
+ default: return true;
+ }
+}
+
+
+int64_t MaxIntFromFormat(VectorFormat vform) {
+ return INT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+
+int64_t MinIntFromFormat(VectorFormat vform) {
+ return INT64_MIN >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+
+
+uint64_t MaxUintFromFormat(VectorFormat vform) {
+ return UINT64_MAX >> (64 - LaneSizeInBitsFromFormat(vform));
+}
+} // namespace vixl
+
diff --git a/js/src/jit/arm64/vixl/Instructions-vixl.h b/js/src/jit/arm64/vixl/Instructions-vixl.h
new file mode 100644
index 0000000000..80a75599f9
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instructions-vixl.h
@@ -0,0 +1,805 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_INSTRUCTIONS_A64_H_
+#define VIXL_A64_INSTRUCTIONS_A64_H_
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+namespace vixl {
+// ISA constants. --------------------------------------------------------------
+
+typedef uint32_t Instr;
+const unsigned kInstructionSize = 4;
+const unsigned kInstructionSizeLog2 = 2;
+const unsigned kLiteralEntrySize = 4;
+const unsigned kLiteralEntrySizeLog2 = 2;
+const unsigned kMaxLoadLiteralRange = 1 * MBytes;
+
+// This is the nominal page size (as used by the adrp instruction); the actual
+// size of the memory pages allocated by the kernel is likely to differ.
+const unsigned kPageSize = 4 * KBytes;
+const unsigned kPageSizeLog2 = 12;
+
+const unsigned kBRegSize = 8;
+const unsigned kBRegSizeLog2 = 3;
+const unsigned kBRegSizeInBytes = kBRegSize / 8;
+const unsigned kBRegSizeInBytesLog2 = kBRegSizeLog2 - 3;
+const unsigned kHRegSize = 16;
+const unsigned kHRegSizeLog2 = 4;
+const unsigned kHRegSizeInBytes = kHRegSize / 8;
+const unsigned kHRegSizeInBytesLog2 = kHRegSizeLog2 - 3;
+const unsigned kWRegSize = 32;
+const unsigned kWRegSizeLog2 = 5;
+const unsigned kWRegSizeInBytes = kWRegSize / 8;
+const unsigned kWRegSizeInBytesLog2 = kWRegSizeLog2 - 3;
+const unsigned kXRegSize = 64;
+const unsigned kXRegSizeLog2 = 6;
+const unsigned kXRegSizeInBytes = kXRegSize / 8;
+const unsigned kXRegSizeInBytesLog2 = kXRegSizeLog2 - 3;
+const unsigned kSRegSize = 32;
+const unsigned kSRegSizeLog2 = 5;
+const unsigned kSRegSizeInBytes = kSRegSize / 8;
+const unsigned kSRegSizeInBytesLog2 = kSRegSizeLog2 - 3;
+const unsigned kDRegSize = 64;
+const unsigned kDRegSizeLog2 = 6;
+const unsigned kDRegSizeInBytes = kDRegSize / 8;
+const unsigned kDRegSizeInBytesLog2 = kDRegSizeLog2 - 3;
+const unsigned kQRegSize = 128;
+const unsigned kQRegSizeLog2 = 7;
+const unsigned kQRegSizeInBytes = kQRegSize / 8;
+const unsigned kQRegSizeInBytesLog2 = kQRegSizeLog2 - 3;
+const uint64_t kWRegMask = UINT64_C(0xffffffff);
+const uint64_t kXRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSRegMask = UINT64_C(0xffffffff);
+const uint64_t kDRegMask = UINT64_C(0xffffffffffffffff);
+const uint64_t kSSignMask = UINT64_C(0x80000000);
+const uint64_t kDSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kWSignMask = UINT64_C(0x80000000);
+const uint64_t kXSignMask = UINT64_C(0x8000000000000000);
+const uint64_t kByteMask = UINT64_C(0xff);
+const uint64_t kHalfWordMask = UINT64_C(0xffff);
+const uint64_t kWordMask = UINT64_C(0xffffffff);
+const uint64_t kXMaxUInt = UINT64_C(0xffffffffffffffff);
+const uint64_t kXMaxExactUInt = UINT64_C(0xfffffffffffff800);
+const uint64_t kWMaxUInt = UINT64_C(0xffffffff);
+const int64_t kXMaxInt = INT64_C(0x7fffffffffffffff);
+const int64_t kXMaxExactInt = UINT64_C(0x7ffffffffffffc00);
+const int64_t kXMinInt = INT64_C(0x8000000000000000);
+const int32_t kWMaxInt = INT32_C(0x7fffffff);
+const int32_t kWMinInt = INT32_C(0x80000000);
+const unsigned kLinkRegCode = 30;
+const unsigned kZeroRegCode = 31;
+const unsigned kSPRegInternalCode = 63;
+const unsigned kRegCodeMask = 0x1f;
+
+const unsigned kAddressTagOffset = 56;
+const unsigned kAddressTagWidth = 8;
+const uint64_t kAddressTagMask =
+ ((UINT64_C(1) << kAddressTagWidth) - 1) << kAddressTagOffset;
+VIXL_STATIC_ASSERT(kAddressTagMask == UINT64_C(0xff00000000000000));
+
+unsigned CalcLSDataSize(LoadStoreOp op);
+unsigned CalcLSPairDataSize(LoadStorePairOp op);
+
+enum ImmBranchType {
+ UnknownBranchType = 0,
+ CondBranchType = 1,
+ UncondBranchType = 2,
+ CompareBranchType = 3,
+ TestBranchType = 4
+};
+
+// The classes of immediate branch ranges, in order of increasing range.
+// Note that CondBranchType and CompareBranchType have the same range.
+enum ImmBranchRangeType {
+ TestBranchRangeType, // tbz/tbnz: imm14 = +/- 32KB.
+ CondBranchRangeType, // b.cond/cbz/cbnz: imm19 = +/- 1MB.
+ UncondBranchRangeType, // b/bl: imm26 = +/- 128MB.
+ UnknownBranchRangeType,
+
+ // Number of 'short-range' branch range types.
+ // We don't consider unconditional branches 'short-range'.
+ NumShortBranchRangeTypes = UncondBranchRangeType
+};
+
+enum AddrMode {
+ Offset,
+ PreIndex,
+ PostIndex
+};
+
+enum Reg31Mode {
+ Reg31IsStackPointer,
+ Reg31IsZeroRegister
+};
+
+// Instructions. ---------------------------------------------------------------
+
+class Instruction {
+ public:
+ Instr InstructionBits() const {
+ return *(reinterpret_cast<const Instr*>(this));
+ }
+
+ void SetInstructionBits(Instr new_instr) {
+ *(reinterpret_cast<Instr*>(this)) = new_instr;
+ }
+
+ int Bit(int pos) const {
+ return (InstructionBits() >> pos) & 1;
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return ExtractUnsignedBitfield32(msb, lsb, InstructionBits());
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ int32_t bits = *(reinterpret_cast<const int32_t*>(this));
+ return ExtractSignedBitfield32(msb, lsb, bits);
+ }
+
+ Instr Mask(uint32_t mask) const {
+ return InstructionBits() & mask;
+ }
+
+ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ int32_t Name() const { return Func(HighBit, LowBit); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
+ #undef DEFINE_GETTER
+
+ #define DEFINE_SETTER(Name, HighBit, LowBit, Func) \
+ inline void Set##Name(unsigned n) { SetBits32(HighBit, LowBit, n); }
+ INSTRUCTION_FIELDS_LIST(DEFINE_SETTER)
+ #undef DEFINE_SETTER
+
+ // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
+ // formed from ImmPCRelLo and ImmPCRelHi.
+ int ImmPCRel() const {
+ int offset =
+ static_cast<int>((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
+ int width = ImmPCRelLo_width + ImmPCRelHi_width;
+ return ExtractSignedBitfield32(width - 1, 0, offset);
+ }
+
+ uint64_t ImmLogical() const;
+ unsigned ImmNEONabcdefgh() const;
+ float ImmFP32() const;
+ double ImmFP64() const;
+ float ImmNEONFP32() const;
+ double ImmNEONFP64() const;
+
+ unsigned SizeLS() const {
+ return CalcLSDataSize(static_cast<LoadStoreOp>(Mask(LoadStoreMask)));
+ }
+
+ unsigned SizeLSPair() const {
+ return CalcLSPairDataSize(
+ static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
+ }
+
+ int NEONLSIndex(int access_size_shift) const {
+ int64_t q = NEONQ();
+ int64_t s = NEONS();
+ int64_t size = NEONLSSize();
+ int64_t index = (q << 3) | (s << 2) | size;
+ return static_cast<int>(index >> access_size_shift);
+ }
+
+ // Helpers.
+ bool IsCondBranchImm() const {
+ return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
+ }
+
+ bool IsUncondBranchImm() const {
+ return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
+ }
+
+ bool IsCompareBranch() const {
+ return Mask(CompareBranchFMask) == CompareBranchFixed;
+ }
+
+ bool IsTestBranch() const {
+ return Mask(TestBranchFMask) == TestBranchFixed;
+ }
+
+ bool IsImmBranch() const {
+ return BranchType() != UnknownBranchType;
+ }
+
+ bool IsPCRelAddressing() const {
+ return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
+ }
+
+ bool IsLogicalImmediate() const {
+ return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
+ }
+
+ bool IsAddSubImmediate() const {
+ return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
+ }
+
+ bool IsAddSubExtended() const {
+ return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
+ }
+
+ bool IsLoadOrStore() const {
+ return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
+ }
+
+ bool IsLoad() const;
+ bool IsStore() const;
+
+ bool IsLoadLiteral() const {
+ // This includes PRFM_lit.
+ return Mask(LoadLiteralFMask) == LoadLiteralFixed;
+ }
+
+ bool IsMovn() const {
+ return (Mask(MoveWideImmediateMask) == MOVN_x) ||
+ (Mask(MoveWideImmediateMask) == MOVN_w);
+ }
+
+ // Mozilla modifications.
+ bool IsUncondB() const;
+ bool IsCondB() const;
+ bool IsBL() const;
+ bool IsBR() const;
+ bool IsBLR() const;
+ bool IsTBZ() const;
+ bool IsTBNZ() const;
+ bool IsCBZ() const;
+ bool IsCBNZ() const;
+ bool IsLDR() const;
+ bool IsNOP() const;
+ bool IsCSDB() const;
+ bool IsADR() const;
+ bool IsADRP() const;
+ bool IsMovz() const;
+ bool IsMovk() const;
+ bool IsBranchLinkImm() const;
+ bool IsTargetReachable(const Instruction* target) const;
+ ptrdiff_t ImmPCRawOffset() const;
+ void SetImmPCRawOffset(ptrdiff_t offset);
+ void SetBits32(int msb, int lsb, unsigned value);
+
+ // Is this a stack pointer synchronization instruction as inserted by
+ // MacroAssembler::syncStackPtr()?
+ bool IsStackPtrSync() const;
+
+ static int ImmBranchRangeBitwidth(ImmBranchType branch_type);
+ static int32_t ImmBranchForwardRange(ImmBranchType branch_type);
+
+ // Check if offset can be encoded as a RAW offset in a branch_type
+ // instruction. The offset must be encodeable directly as the immediate field
+ // in the instruction, it is not scaled by kInstructionSize first.
+ static bool IsValidImmPCOffset(ImmBranchType branch_type, int64_t offset);
+
+ // Get the range type corresponding to a branch type.
+ static ImmBranchRangeType ImmBranchTypeToRange(ImmBranchType);
+
+ // Get the maximum realizable forward PC offset (in bytes) for an immediate
+ // branch of the given range type.
+ // This is the largest positive multiple of kInstructionSize, offset, such
+ // that:
+ //
+ // IsValidImmPCOffset(xxx, offset / kInstructionSize)
+ //
+ // returns true for the same branch type.
+ static int32_t ImmBranchMaxForwardOffset(ImmBranchRangeType range_type);
+
+ // Get the minimuum realizable backward PC offset (in bytes) for an immediate
+ // branch of the given range type.
+ // This is the smallest (i.e., largest in magnitude) negative multiple of
+ // kInstructionSize, offset, such that:
+ //
+ // IsValidImmPCOffset(xxx, offset / kInstructionSize)
+ //
+ // returns true for the same branch type.
+ static int32_t ImmBranchMinBackwardOffset(ImmBranchRangeType range_type);
+
+ // Indicate whether Rd can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rd field.
+ Reg31Mode RdMode() const {
+ // The following instructions use sp or wsp as Rd:
+ // Add/sub (immediate) when not setting the flags.
+ // Add/sub (extended) when not setting the flags.
+ // Logical (immediate) when not setting the flags.
+ // Otherwise, r31 is the zero register.
+ if (IsAddSubImmediate() || IsAddSubExtended()) {
+ if (Mask(AddSubSetFlagsBit)) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ if (IsLogicalImmediate()) {
+ // Of the logical (immediate) instructions, only ANDS (and its aliases)
+ // can set the flags. The others can all write into sp.
+ // Note that some logical operations are not available to
+ // immediate-operand instructions, so we have to combine two masks here.
+ if (Mask(LogicalImmediateMask & LogicalOpMask) == ANDS) {
+ return Reg31IsZeroRegister;
+ } else {
+ return Reg31IsStackPointer;
+ }
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ // Indicate whether Rn can be the stack pointer or the zero register. This
+ // does not check that the instruction actually has an Rn field.
+ Reg31Mode RnMode() const {
+ // The following instructions use sp or wsp as Rn:
+ // All loads and stores.
+ // Add/sub (immediate).
+ // Add/sub (extended).
+ // Otherwise, r31 is the zero register.
+ if (IsLoadOrStore() || IsAddSubImmediate() || IsAddSubExtended()) {
+ return Reg31IsStackPointer;
+ }
+ return Reg31IsZeroRegister;
+ }
+
+ ImmBranchType BranchType() const {
+ if (IsCondBranchImm()) {
+ return CondBranchType;
+ } else if (IsUncondBranchImm()) {
+ return UncondBranchType;
+ } else if (IsCompareBranch()) {
+ return CompareBranchType;
+ } else if (IsTestBranch()) {
+ return TestBranchType;
+ } else {
+ return UnknownBranchType;
+ }
+ }
+
+ // Find the target of this instruction. 'this' may be a branch or a
+ // PC-relative addressing instruction.
+ const Instruction* ImmPCOffsetTarget() const;
+
+ // Patch a PC-relative offset to refer to 'target'. 'this' may be a branch or
+ // a PC-relative addressing instruction.
+ void SetImmPCOffsetTarget(const Instruction* target);
+ // Patch a literal load instruction to load from 'source'.
+ void SetImmLLiteral(const Instruction* source);
+
+ // The range of a load literal instruction, expressed as 'instr +- range'.
+ // The range is actually the 'positive' range; the branch instruction can
+ // target [instr - range - kInstructionSize, instr + range].
+ static const int kLoadLiteralImmBitwidth = 19;
+ static const int kLoadLiteralRange =
+ (1 << kLoadLiteralImmBitwidth) / 2 - kInstructionSize;
+
+ // Calculate the address of a literal referred to by a load-literal
+ // instruction, and return it as the specified type.
+ //
+ // The literal itself is safely mutable only if the backing buffer is safely
+ // mutable.
+ template <typename T>
+ T LiteralAddress() const {
+ uint64_t base_raw = reinterpret_cast<uint64_t>(this);
+ int64_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
+ uint64_t address_raw = base_raw + offset;
+
+ // Cast the address using a C-style cast. A reinterpret_cast would be
+ // appropriate, but it can't cast one integral type to another.
+ T address = (T)(address_raw);
+
+ // Assert that the address can be represented by the specified type.
+ VIXL_ASSERT((uint64_t)(address) == address_raw);
+
+ return address;
+ }
+
+ uint32_t Literal32() const {
+ uint32_t literal;
+ memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
+ return literal;
+ }
+
+ uint64_t Literal64() const {
+ uint64_t literal;
+ memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
+ return literal;
+ }
+
+ void SetLiteral64(uint64_t literal) const {
+ memcpy(LiteralAddress<void*>(), &literal, sizeof(literal));
+ }
+
+ float LiteralFP32() const {
+ return RawbitsToFloat(Literal32());
+ }
+
+ double LiteralFP64() const {
+ return RawbitsToDouble(Literal64());
+ }
+
+ const Instruction* NextInstruction() const {
+ return this + kInstructionSize;
+ }
+
+ // Skip any constant pools with artificial guards at this point.
+ // Return either |this| or the first instruction after the pool.
+ const Instruction* skipPool() const;
+
+ const Instruction* InstructionAtOffset(int64_t offset) const {
+ VIXL_ASSERT(IsWordAligned(this + offset));
+ return this + offset;
+ }
+
+ template<typename T> static Instruction* Cast(T src) {
+ return reinterpret_cast<Instruction*>(src);
+ }
+
+ template<typename T> static const Instruction* CastConst(T src) {
+ return reinterpret_cast<const Instruction*>(src);
+ }
+
+ private:
+ int ImmBranch() const;
+
+ static float Imm8ToFP32(uint32_t imm8);
+ static double Imm8ToFP64(uint32_t imm8);
+
+ void SetPCRelImmTarget(const Instruction* target);
+ void SetBranchImmTarget(const Instruction* target);
+};
+
+
+// Functions for handling NEON vector format information.
+enum VectorFormat {
+ kFormatUndefined = 0xffffffff,
+ kFormat8B = NEON_8B,
+ kFormat16B = NEON_16B,
+ kFormat4H = NEON_4H,
+ kFormat8H = NEON_8H,
+ kFormat2S = NEON_2S,
+ kFormat4S = NEON_4S,
+ kFormat1D = NEON_1D,
+ kFormat2D = NEON_2D,
+
+ // Scalar formats. We add the scalar bit to distinguish between scalar and
+ // vector enumerations; the bit is always set in the encoding of scalar ops
+ // and always clear for vector ops. Although kFormatD and kFormat1D appear
+ // to be the same, their meaning is subtly different. The first is a scalar
+ // operation, the second a vector operation that only affects one lane.
+ kFormatB = NEON_B | NEONScalar,
+ kFormatH = NEON_H | NEONScalar,
+ kFormatS = NEON_S | NEONScalar,
+ kFormatD = NEON_D | NEONScalar
+};
+
+VectorFormat VectorFormatHalfWidth(const VectorFormat vform);
+VectorFormat VectorFormatDoubleWidth(const VectorFormat vform);
+VectorFormat VectorFormatDoubleLanes(const VectorFormat vform);
+VectorFormat VectorFormatHalfLanes(const VectorFormat vform);
+VectorFormat ScalarFormatFromLaneSize(int lanesize);
+VectorFormat VectorFormatHalfWidthDoubleLanes(const VectorFormat vform);
+VectorFormat VectorFormatFillQ(const VectorFormat vform);
+unsigned RegisterSizeInBitsFromFormat(VectorFormat vform);
+unsigned RegisterSizeInBytesFromFormat(VectorFormat vform);
+// TODO: Make the return types of these functions consistent.
+unsigned LaneSizeInBitsFromFormat(VectorFormat vform);
+int LaneSizeInBytesFromFormat(VectorFormat vform);
+int LaneSizeInBytesLog2FromFormat(VectorFormat vform);
+int LaneCountFromFormat(VectorFormat vform);
+int MaxLaneCountFromFormat(VectorFormat vform);
+bool IsVectorFormat(VectorFormat vform);
+int64_t MaxIntFromFormat(VectorFormat vform);
+int64_t MinIntFromFormat(VectorFormat vform);
+uint64_t MaxUintFromFormat(VectorFormat vform);
+
+
+enum NEONFormat {
+ NF_UNDEF = 0,
+ NF_8B = 1,
+ NF_16B = 2,
+ NF_4H = 3,
+ NF_8H = 4,
+ NF_2S = 5,
+ NF_4S = 6,
+ NF_1D = 7,
+ NF_2D = 8,
+ NF_B = 9,
+ NF_H = 10,
+ NF_S = 11,
+ NF_D = 12
+};
+
+static const unsigned kNEONFormatMaxBits = 6;
+
+struct NEONFormatMap {
+ // The bit positions in the instruction to consider.
+ uint8_t bits[kNEONFormatMaxBits];
+
+ // Mapping from concatenated bits to format.
+ NEONFormat map[1 << kNEONFormatMaxBits];
+};
+
+class NEONFormatDecoder {
+ public:
+ enum SubstitutionMode {
+ kPlaceholder,
+ kFormat
+ };
+
+ // Construct a format decoder with increasingly specific format maps for each
+ // subsitution. If no format map is specified, the default is the integer
+ // format map.
+ explicit NEONFormatDecoder(const Instruction* instr) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(IntegerFormatMap());
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format);
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format0,
+ const NEONFormatMap* format1) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format0, format1);
+ }
+ NEONFormatDecoder(const Instruction* instr,
+ const NEONFormatMap* format0,
+ const NEONFormatMap* format1,
+ const NEONFormatMap* format2) {
+ instrbits_ = instr->InstructionBits();
+ SetFormatMaps(format0, format1, format2);
+ }
+
+ // Set the format mapping for all or individual substitutions.
+ void SetFormatMaps(const NEONFormatMap* format0,
+ const NEONFormatMap* format1 = NULL,
+ const NEONFormatMap* format2 = NULL) {
+ VIXL_ASSERT(format0 != NULL);
+ formats_[0] = format0;
+ formats_[1] = (format1 == NULL) ? formats_[0] : format1;
+ formats_[2] = (format2 == NULL) ? formats_[1] : format2;
+ }
+ void SetFormatMap(unsigned index, const NEONFormatMap* format) {
+ VIXL_ASSERT(index <= (sizeof(formats_) / sizeof(formats_[0])));
+ VIXL_ASSERT(format != NULL);
+ formats_[index] = format;
+ }
+
+ // Substitute %s in the input string with the placeholder string for each
+ // register, ie. "'B", "'H", etc.
+ const char* SubstitutePlaceholders(const char* string) {
+ return Substitute(string, kPlaceholder, kPlaceholder, kPlaceholder);
+ }
+
+ // Substitute %s in the input string with a new string based on the
+ // substitution mode.
+ const char* Substitute(const char* string,
+ SubstitutionMode mode0 = kFormat,
+ SubstitutionMode mode1 = kFormat,
+ SubstitutionMode mode2 = kFormat) {
+ snprintf(form_buffer_, sizeof(form_buffer_), string,
+ GetSubstitute(0, mode0),
+ GetSubstitute(1, mode1),
+ GetSubstitute(2, mode2));
+ return form_buffer_;
+ }
+
+ // Append a "2" to a mnemonic string based of the state of the Q bit.
+ const char* Mnemonic(const char* mnemonic) {
+ if ((instrbits_ & NEON_Q) != 0) {
+ snprintf(mne_buffer_, sizeof(mne_buffer_), "%s2", mnemonic);
+ return mne_buffer_;
+ }
+ return mnemonic;
+ }
+
+ VectorFormat GetVectorFormat(int format_index = 0) {
+ return GetVectorFormat(formats_[format_index]);
+ }
+
+ VectorFormat GetVectorFormat(const NEONFormatMap* format_map) {
+ static const VectorFormat vform[] = {
+ kFormatUndefined,
+ kFormat8B, kFormat16B, kFormat4H, kFormat8H,
+ kFormat2S, kFormat4S, kFormat1D, kFormat2D,
+ kFormatB, kFormatH, kFormatS, kFormatD
+ };
+ VIXL_ASSERT(GetNEONFormat(format_map) < (sizeof(vform) / sizeof(vform[0])));
+ return vform[GetNEONFormat(format_map)];
+ }
+
+ // Built in mappings for common cases.
+
+ // The integer format map uses three bits (Q, size<1:0>) to encode the
+ // "standard" set of NEON integer vector formats.
+ static const NEONFormatMap* IntegerFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22, 30},
+ {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_UNDEF, NF_2D}
+ };
+ return &map;
+ }
+
+ // The long integer format map uses two bits (size<1:0>) to encode the
+ // long set of NEON integer vector formats. These are used in narrow, wide
+ // and long operations.
+ static const NEONFormatMap* LongIntegerFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_8H, NF_4S, NF_2D}
+ };
+ return &map;
+ }
+
+ // The FP format map uses two bits (Q, size<0>) to encode the NEON FP vector
+ // formats: NF_2S, NF_4S, NF_2D.
+ static const NEONFormatMap* FPFormatMap() {
+ // The FP format map assumes two bits (Q, size<0>) are used to encode the
+ // NEON FP vector formats: NF_2S, NF_4S, NF_2D.
+ static const NEONFormatMap map = {
+ {22, 30}, {NF_2S, NF_4S, NF_UNDEF, NF_2D}
+ };
+ return &map;
+ }
+
+ // The load/store format map uses three bits (Q, 11, 10) to encode the
+ // set of NEON vector formats.
+ static const NEONFormatMap* LoadStoreFormatMap() {
+ static const NEONFormatMap map = {
+ {11, 10, 30},
+ {NF_8B, NF_16B, NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+ return &map;
+ }
+
+ // The logical format map uses one bit (Q) to encode the NEON vector format:
+ // NF_8B, NF_16B.
+ static const NEONFormatMap* LogicalFormatMap() {
+ static const NEONFormatMap map = {
+ {30}, {NF_8B, NF_16B}
+ };
+ return &map;
+ }
+
+ // The triangular format map uses between two and five bits to encode the NEON
+ // vector format:
+ // xxx10->8B, xxx11->16B, xx100->4H, xx101->8H
+ // x1000->2S, x1001->4S, 10001->2D, all others undefined.
+ static const NEONFormatMap* TriangularFormatMap() {
+ static const NEONFormatMap map = {
+ {19, 18, 17, 16, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S,
+ NF_4S, NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_UNDEF, NF_2D,
+ NF_8B, NF_16B, NF_4H, NF_8H, NF_8B, NF_16B, NF_2S, NF_4S, NF_8B, NF_16B,
+ NF_4H, NF_8H, NF_8B, NF_16B}
+ };
+ return &map;
+ }
+
+ // The scalar format map uses two bits (size<1:0>) to encode the NEON scalar
+ // formats: NF_B, NF_H, NF_S, NF_D.
+ static const NEONFormatMap* ScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_B, NF_H, NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The long scalar format map uses two bits (size<1:0>) to encode the longer
+ // NEON scalar formats: NF_H, NF_S, NF_D.
+ static const NEONFormatMap* LongScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {23, 22}, {NF_H, NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The FP scalar format map assumes one bit (size<0>) is used to encode the
+ // NEON FP scalar formats: NF_S, NF_D.
+ static const NEONFormatMap* FPScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {22}, {NF_S, NF_D}
+ };
+ return &map;
+ }
+
+ // The triangular scalar format map uses between one and four bits to encode
+ // the NEON FP scalar formats:
+ // xxx1->B, xx10->H, x100->S, 1000->D, all others undefined.
+ static const NEONFormatMap* TriangularScalarFormatMap() {
+ static const NEONFormatMap map = {
+ {19, 18, 17, 16},
+ {NF_UNDEF, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B,
+ NF_D, NF_B, NF_H, NF_B, NF_S, NF_B, NF_H, NF_B}
+ };
+ return &map;
+ }
+
+ private:
+ // Get a pointer to a string that represents the format or placeholder for
+ // the specified substitution index, based on the format map and instruction.
+ const char* GetSubstitute(int index, SubstitutionMode mode) {
+ if (mode == kFormat) {
+ return NEONFormatAsString(GetNEONFormat(formats_[index]));
+ }
+ VIXL_ASSERT(mode == kPlaceholder);
+ return NEONFormatAsPlaceholder(GetNEONFormat(formats_[index]));
+ }
+
+ // Get the NEONFormat enumerated value for bits obtained from the
+ // instruction based on the specified format mapping.
+ NEONFormat GetNEONFormat(const NEONFormatMap* format_map) {
+ return format_map->map[PickBits(format_map->bits)];
+ }
+
+ // Convert a NEONFormat into a string.
+ static const char* NEONFormatAsString(NEONFormat format) {
+ static const char* formats[] = {
+ "undefined",
+ "8b", "16b", "4h", "8h", "2s", "4s", "1d", "2d",
+ "b", "h", "s", "d"
+ };
+ VIXL_ASSERT(format < (sizeof(formats) / sizeof(formats[0])));
+ return formats[format];
+ }
+
+ // Convert a NEONFormat into a register placeholder string.
+ static const char* NEONFormatAsPlaceholder(NEONFormat format) {
+ VIXL_ASSERT((format == NF_B) || (format == NF_H) ||
+ (format == NF_S) || (format == NF_D) ||
+ (format == NF_UNDEF));
+ static const char* formats[] = {
+ "undefined",
+ "undefined", "undefined", "undefined", "undefined",
+ "undefined", "undefined", "undefined", "undefined",
+ "'B", "'H", "'S", "'D"
+ };
+ return formats[format];
+ }
+
+ // Select bits from instrbits_ defined by the bits array, concatenate them,
+ // and return the value.
+ uint8_t PickBits(const uint8_t bits[]) {
+ uint8_t result = 0;
+ for (unsigned b = 0; b < kNEONFormatMaxBits; b++) {
+ if (bits[b] == 0) break;
+ result <<= 1;
+ result |= ((instrbits_ & (1 << bits[b])) == 0) ? 0 : 1;
+ }
+ return result;
+ }
+
+ Instr instrbits_;
+ const NEONFormatMap* formats_[3];
+ char form_buffer_[64];
+ char mne_buffer_[16];
+};
+} // namespace vixl
+
+#endif // VIXL_A64_INSTRUCTIONS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Instrument-vixl.cpp b/js/src/jit/arm64/vixl/Instrument-vixl.cpp
new file mode 100644
index 0000000000..5b245b6cf9
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instrument-vixl.cpp
@@ -0,0 +1,844 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Instrument-vixl.h"
+
+namespace vixl {
+
+Counter::Counter(const char* name, CounterType type)
+ : count_(0), enabled_(false), type_(type) {
+ VIXL_ASSERT(name != NULL);
+ strncpy(name_, name, kCounterNameMaxLength);
+}
+
+
+void Counter::Enable() {
+ enabled_ = true;
+}
+
+
+void Counter::Disable() {
+ enabled_ = false;
+}
+
+
+bool Counter::IsEnabled() {
+ return enabled_;
+}
+
+
+void Counter::Increment() {
+ if (enabled_) {
+ count_++;
+ }
+}
+
+
+uint64_t Counter::count() {
+ uint64_t result = count_;
+ if (type_ == Gauge) {
+ // If the counter is a Gauge, reset the count after reading.
+ count_ = 0;
+ }
+ return result;
+}
+
+
+const char* Counter::name() {
+ return name_;
+}
+
+
+CounterType Counter::type() {
+ return type_;
+}
+
+
+struct CounterDescriptor {
+ const char* name;
+ CounterType type;
+};
+
+
+static const CounterDescriptor kCounterList[] = {
+ {"Instruction", Cumulative},
+
+ {"Move Immediate", Gauge},
+ {"Add/Sub DP", Gauge},
+ {"Logical DP", Gauge},
+ {"Other Int DP", Gauge},
+ {"FP DP", Gauge},
+
+ {"Conditional Select", Gauge},
+ {"Conditional Compare", Gauge},
+
+ {"Unconditional Branch", Gauge},
+ {"Compare and Branch", Gauge},
+ {"Test and Branch", Gauge},
+ {"Conditional Branch", Gauge},
+
+ {"Load Integer", Gauge},
+ {"Load FP", Gauge},
+ {"Load Pair", Gauge},
+ {"Load Literal", Gauge},
+
+ {"Store Integer", Gauge},
+ {"Store FP", Gauge},
+ {"Store Pair", Gauge},
+
+ {"PC Addressing", Gauge},
+ {"Other", Gauge},
+ {"NEON", Gauge},
+ {"Crypto", Gauge}
+};
+
+
+Instrument::Instrument(const char* datafile, uint64_t sample_period)
+ : output_stream_(stdout), sample_period_(sample_period) {
+
+ // Set up the output stream. If datafile is non-NULL, use that file. If it
+ // can't be opened, or datafile is NULL, use stdout.
+ if (datafile != NULL) {
+ output_stream_ = fopen(datafile, "w");
+ if (output_stream_ == NULL) {
+ printf("Can't open output file %s. Using stdout.\n", datafile);
+ output_stream_ = stdout;
+ }
+ }
+
+ static const int num_counters =
+ sizeof(kCounterList) / sizeof(CounterDescriptor);
+
+ // Dump an instrumentation description comment at the top of the file.
+ fprintf(output_stream_, "# counters=%d\n", num_counters);
+ fprintf(output_stream_, "# sample_period=%" PRIu64 "\n", sample_period_);
+
+ // Construct Counter objects from counter description array.
+ for (int i = 0; i < num_counters; i++) {
+ if (Counter* counter = js_new<Counter>(kCounterList[i].name, kCounterList[i].type))
+ (void)counters_.append(counter);
+ }
+
+ DumpCounterNames();
+}
+
+
+Instrument::~Instrument() {
+ // Dump any remaining instruction data to the output file.
+ DumpCounters();
+
+ // Free all the counter objects.
+ for (auto counter : counters_) {
+ js_delete(counter);
+ }
+
+ if (output_stream_ != stdout) {
+ fclose(output_stream_);
+ }
+}
+
+
+void Instrument::Update() {
+ // Increment the instruction counter, and dump all counters if a sample period
+ // has elapsed.
+ static Counter* counter = GetCounter("Instruction");
+ VIXL_ASSERT(counter->type() == Cumulative);
+ counter->Increment();
+
+ if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
+ DumpCounters();
+ }
+}
+
+
+void Instrument::DumpCounters() {
+ // Iterate through the counter objects, dumping their values to the output
+ // stream.
+ for (auto counter : counters_) {
+ fprintf(output_stream_, "%" PRIu64 ",", counter->count());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::DumpCounterNames() {
+ // Iterate through the counter objects, dumping the counter names to the
+ // output stream.
+ for (auto counter : counters_) {
+ fprintf(output_stream_, "%s,", counter->name());
+ }
+ fprintf(output_stream_, "\n");
+ fflush(output_stream_);
+}
+
+
+void Instrument::HandleInstrumentationEvent(unsigned event) {
+ switch (event) {
+ case InstrumentStateEnable: Enable(); break;
+ case InstrumentStateDisable: Disable(); break;
+ default: DumpEventMarker(event);
+ }
+}
+
+
+void Instrument::DumpEventMarker(unsigned marker) {
+ // Dumpan event marker to the output stream as a specially formatted comment
+ // line.
+ static Counter* counter = GetCounter("Instruction");
+
+ fprintf(output_stream_, "# %c%c @ %" PRId64 "\n", marker & 0xff,
+ (marker >> 8) & 0xff, counter->count());
+}
+
+
+Counter* Instrument::GetCounter(const char* name) {
+ // Get a Counter object by name from the counter list.
+ for (auto counter : counters_) {
+ if (strcmp(counter->name(), name) == 0) {
+ return counter;
+ }
+ }
+
+ // A Counter by that name does not exist: print an error message to stderr
+ // and the output file, and exit.
+ static const char* error_message =
+ "# Error: Unknown counter \"%s\". Exiting.\n";
+ fprintf(stderr, error_message, name);
+ fprintf(output_stream_, error_message, name);
+ exit(1);
+}
+
+
+void Instrument::Enable() {
+ for (auto counter : counters_) {
+ counter->Enable();
+ }
+}
+
+
+void Instrument::Disable() {
+ for (auto counter : counters_) {
+ counter->Disable();
+ }
+}
+
+
+void Instrument::VisitPCRelAddressing(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("PC Addressing");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitLogicalImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitMoveWideImmediate(const Instruction* instr) {
+ Update();
+ static Counter* counter = GetCounter("Move Immediate");
+
+ if (instr->IsMovn() && (instr->Rd() == kZeroRegCode)) {
+ unsigned imm = instr->ImmMoveWide();
+ HandleInstrumentationEvent(imm);
+ } else {
+ counter->Increment();
+ }
+}
+
+
+void Instrument::VisitBitfield(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitExtract(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnconditionalBranchToRegister(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Unconditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCompareBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Compare and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitTestBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Test and Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalBranch(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Branch");
+ counter->Increment();
+}
+
+
+void Instrument::VisitSystem(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitException(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStorePair(const Instruction* instr) {
+ static Counter* load_pair_counter = GetCounter("Load Pair");
+ static Counter* store_pair_counter = GetCounter("Store Pair");
+
+ if (instr->Mask(LoadStorePairLBit) != 0) {
+ load_pair_counter->Increment();
+ } else {
+ store_pair_counter->Increment();
+ }
+}
+
+
+void Instrument::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ Update();
+ InstrumentLoadStorePair(instr);
+}
+
+
+void Instrument::VisitLoadStoreExclusive(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitLoadLiteral(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Load Literal");
+ counter->Increment();
+}
+
+
+void Instrument::InstrumentLoadStore(const Instruction* instr) {
+ static Counter* load_int_counter = GetCounter("Load Integer");
+ static Counter* store_int_counter = GetCounter("Store Integer");
+ static Counter* load_fp_counter = GetCounter("Load FP");
+ static Counter* store_fp_counter = GetCounter("Store FP");
+
+ switch (instr->Mask(LoadStoreMask)) {
+ case STRB_w:
+ case STRH_w:
+ case STR_w:
+ VIXL_FALLTHROUGH();
+ case STR_x: store_int_counter->Increment(); break;
+ case STR_s:
+ VIXL_FALLTHROUGH();
+ case STR_d: store_fp_counter->Increment(); break;
+ case LDRB_w:
+ case LDRH_w:
+ case LDR_w:
+ case LDR_x:
+ case LDRSB_x:
+ case LDRSH_x:
+ case LDRSW_x:
+ case LDRSB_w:
+ VIXL_FALLTHROUGH();
+ case LDRSH_w: load_int_counter->Increment(); break;
+ case LDR_s:
+ VIXL_FALLTHROUGH();
+ case LDR_d: load_fp_counter->Increment(); break;
+ }
+}
+
+
+void Instrument::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePostIndex(const Instruction* instr) {
+ USE(instr);
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStorePreIndex(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ Update();
+ InstrumentLoadStore(instr);
+}
+
+
+void Instrument::VisitLogicalShifted(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Logical DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubShifted(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubExtended(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitAddSubWithCarry(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Add/Sub DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareRegister(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalCompareImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitConditionalSelect(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing1Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing2Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitDataProcessing3Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other Int DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPCompare(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalCompare(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Compare");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPConditionalSelect(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Conditional Select");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing1Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing2Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPDataProcessing3Source(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPIntegerConvert(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitFPFixedPointConvert(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("FP DP");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCrypto2RegSHA(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCrypto3RegSHA(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitCryptoAES(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Crypto");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON2RegMisc(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON3Same(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEON3Different(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONAcrossLanes(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONByIndexedElement(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONCopy(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONExtract(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONModifiedImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar3Diff(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalar3Same(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarCopy(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarPairwise(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONShiftImmediate(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONTable(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitNEONPerm(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("NEON");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnallocated(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+void Instrument::VisitUnimplemented(const Instruction* instr) {
+ USE(instr);
+ Update();
+ static Counter* counter = GetCounter("Other");
+ counter->Increment();
+}
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Instrument-vixl.h b/js/src/jit/arm64/vixl/Instrument-vixl.h
new file mode 100644
index 0000000000..eca076d234
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Instrument-vixl.h
@@ -0,0 +1,109 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_INSTRUMENT_A64_H_
+#define VIXL_A64_INSTRUMENT_A64_H_
+
+#include "mozilla/Vector.h"
+
+#include "jit/arm64/vixl/Constants-vixl.h"
+#include "jit/arm64/vixl/Decoder-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+#include "js/AllocPolicy.h"
+
+namespace vixl {
+
+const int kCounterNameMaxLength = 256;
+const uint64_t kDefaultInstrumentationSamplingPeriod = 1 << 22;
+
+
+enum InstrumentState {
+ InstrumentStateDisable = 0,
+ InstrumentStateEnable = 1
+};
+
+
+enum CounterType {
+ Gauge = 0, // Gauge counters reset themselves after reading.
+ Cumulative = 1 // Cumulative counters keep their value after reading.
+};
+
+
+class Counter {
+ public:
+ explicit Counter(const char* name, CounterType type = Gauge);
+
+ void Increment();
+ void Enable();
+ void Disable();
+ bool IsEnabled();
+ uint64_t count();
+ const char* name();
+ CounterType type();
+
+ private:
+ char name_[kCounterNameMaxLength];
+ uint64_t count_;
+ bool enabled_;
+ CounterType type_;
+};
+
+
+class Instrument: public DecoderVisitor {
+ public:
+ explicit Instrument(const char* datafile = NULL,
+ uint64_t sample_period = kDefaultInstrumentationSamplingPeriod);
+ ~Instrument();
+
+ void Enable();
+ void Disable();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) void Visit##A(const Instruction* instr) override;
+ VISITOR_LIST(DECLARE)
+ #undef DECLARE
+
+ private:
+ void Update();
+ void DumpCounters();
+ void DumpCounterNames();
+ void DumpEventMarker(unsigned marker);
+ void HandleInstrumentationEvent(unsigned event);
+ Counter* GetCounter(const char* name);
+
+ void InstrumentLoadStore(const Instruction* instr);
+ void InstrumentLoadStorePair(const Instruction* instr);
+
+ mozilla::Vector<Counter*, 8, js::SystemAllocPolicy> counters_;
+
+ FILE *output_stream_;
+ uint64_t sample_period_;
+};
+
+} // namespace vixl
+
+#endif // VIXL_A64_INSTRUMENT_A64_H_
diff --git a/js/src/jit/arm64/vixl/Logic-vixl.cpp b/js/src/jit/arm64/vixl/Logic-vixl.cpp
new file mode 100644
index 0000000000..71821a333f
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Logic-vixl.cpp
@@ -0,0 +1,4738 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include <cmath>
+
+#include "jit/arm64/vixl/Simulator-vixl.h"
+
+namespace vixl {
+
+template<> double Simulator::FPDefaultNaN<double>() {
+ return kFP64DefaultNaN;
+}
+
+
+template<> float Simulator::FPDefaultNaN<float>() {
+ return kFP32DefaultNaN;
+}
+
+
+double Simulator::FixedToDouble(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToDouble(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToDouble(-src, fbits, round);
+ }
+}
+
+
+double Simulator::UFixedToDouble(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src);
+ const int64_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToDouble(0, exponent, src, round);
+}
+
+
+float Simulator::FixedToFloat(int64_t src, int fbits, FPRounding round) {
+ if (src >= 0) {
+ return UFixedToFloat(src, fbits, round);
+ } else {
+ // This works for all negative values, including INT64_MIN.
+ return -UFixedToFloat(-src, fbits, round);
+ }
+}
+
+
+float Simulator::UFixedToFloat(uint64_t src, int fbits, FPRounding round) {
+ // An input of 0 is a special case because the result is effectively
+ // subnormal: The exponent is encoded as 0 and there is no implicit 1 bit.
+ if (src == 0) {
+ return 0.0f;
+ }
+
+ // Calculate the exponent. The highest significant bit will have the value
+ // 2^exponent.
+ const int highest_significant_bit = 63 - CountLeadingZeros(src);
+ const int32_t exponent = highest_significant_bit - fbits;
+
+ return FPRoundToFloat(0, exponent, src, round);
+}
+
+
+void Simulator::ld1(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16))
+ return;
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.ReadUintFromMem(vform, i, addr);
+ addr += LaneSizeInBytesFromFormat(vform);
+ }
+}
+
+
+void Simulator::ld1(VectorFormat vform,
+ LogicVRegister dst,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)))
+ return;
+ dst.ReadUintFromMem(vform, index, addr);
+}
+
+
+void Simulator::ld1r(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)))
+ return;
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.ReadUintFromMem(vform, i, addr);
+ }
+}
+
+
+void Simulator::ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, 16*2))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ addr1 += 2 * esize;
+ addr2 += 2 * esize;
+ }
+}
+
+
+void Simulator::ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, LaneSizeInBytesFromFormat(vform)*2))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+}
+
+
+void Simulator::ld2r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*2))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ }
+}
+
+
+void Simulator::ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, 16*3))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ uint64_t addr3 = addr2 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ addr1 += 3 * esize;
+ addr2 += 3 * esize;
+ addr3 += 3 * esize;
+ }
+}
+
+
+void Simulator::ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, LaneSizeInBytesFromFormat(vform)*3))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+ dst3.ReadUintFromMem(vform, index, addr3);
+}
+
+
+void Simulator::ld3r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*3))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ }
+}
+
+
+void Simulator::ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, 16*4))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr1 + esize;
+ uint64_t addr3 = addr2 + esize;
+ uint64_t addr4 = addr3 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr1);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ dst4.ReadUintFromMem(vform, i, addr4);
+ addr1 += 4 * esize;
+ addr2 += 4 * esize;
+ addr3 += 4 * esize;
+ addr4 += 4 * esize;
+ }
+}
+
+
+void Simulator::ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr1) {
+ if (handle_wasm_seg_fault(addr1, LaneSizeInBytesFromFormat(vform)*4))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ uint64_t addr2 = addr1 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform);
+ dst1.ReadUintFromMem(vform, index, addr1);
+ dst2.ReadUintFromMem(vform, index, addr2);
+ dst3.ReadUintFromMem(vform, index, addr3);
+ dst4.ReadUintFromMem(vform, index, addr4);
+}
+
+
+void Simulator::ld4r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*4))
+ return;
+ dst1.ClearForWrite(vform);
+ dst2.ClearForWrite(vform);
+ dst3.ClearForWrite(vform);
+ dst4.ClearForWrite(vform);
+ uint64_t addr2 = addr + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr3 = addr2 + LaneSizeInBytesFromFormat(vform);
+ uint64_t addr4 = addr3 + LaneSizeInBytesFromFormat(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst1.ReadUintFromMem(vform, i, addr);
+ dst2.ReadUintFromMem(vform, i, addr2);
+ dst3.ReadUintFromMem(vform, i, addr3);
+ dst4.ReadUintFromMem(vform, i, addr4);
+ }
+}
+
+
+void Simulator::st1(VectorFormat vform,
+ LogicVRegister src,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16))
+ return;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ src.WriteUintToMem(vform, i, addr);
+ addr += LaneSizeInBytesFromFormat(vform);
+ }
+}
+
+
+void Simulator::st1(VectorFormat vform,
+ LogicVRegister src,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)))
+ return;
+ src.WriteUintToMem(vform, index, addr);
+}
+
+
+void Simulator::st2(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16*2))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ addr += 2 * esize;
+ addr2 += 2 * esize;
+ }
+}
+
+
+void Simulator::st2(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*2))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+}
+
+
+void Simulator::st3(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16*3))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ uint64_t addr3 = addr2 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ dst3.WriteUintToMem(vform, i, addr3);
+ addr += 3 * esize;
+ addr2 += 3 * esize;
+ addr3 += 3 * esize;
+ }
+}
+
+
+void Simulator::st3(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*3))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+ dst3.WriteUintToMem(vform, index, addr + 2 * esize);
+}
+
+
+void Simulator::st4(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, 16*4))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ uint64_t addr2 = addr + esize;
+ uint64_t addr3 = addr2 + esize;
+ uint64_t addr4 = addr3 + esize;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.WriteUintToMem(vform, i, addr);
+ dst2.WriteUintToMem(vform, i, addr2);
+ dst3.WriteUintToMem(vform, i, addr3);
+ dst4.WriteUintToMem(vform, i, addr4);
+ addr += 4 * esize;
+ addr2 += 4 * esize;
+ addr3 += 4 * esize;
+ addr4 += 4 * esize;
+ }
+}
+
+
+void Simulator::st4(VectorFormat vform,
+ LogicVRegister dst,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr) {
+ if (handle_wasm_seg_fault(addr, LaneSizeInBytesFromFormat(vform)*4))
+ return;
+ int esize = LaneSizeInBytesFromFormat(vform);
+ dst.WriteUintToMem(vform, index, addr);
+ dst2.WriteUintToMem(vform, index, addr + 1 * esize);
+ dst3.WriteUintToMem(vform, index, addr + 2 * esize);
+ dst4.WriteUintToMem(vform, index, addr + 3 * esize);
+}
+
+
+LogicVRegister Simulator::cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t sa = src1.Int(vform, i);
+ int64_t sb = src2.Int(vform, i);
+ uint64_t ua = src1.Uint(vform, i);
+ uint64_t ub = src2.Uint(vform, i);
+ bool result = false;
+ switch (cond) {
+ case eq: result = (ua == ub); break;
+ case ge: result = (sa >= sb); break;
+ case gt: result = (sa > sb) ; break;
+ case hi: result = (ua > ub) ; break;
+ case hs: result = (ua >= ub); break;
+ case lt: result = (sa < sb) ; break;
+ case le: result = (sa <= sb); break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+ dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ int imm,
+ Condition cond) {
+ SimVRegister temp;
+ LogicVRegister imm_reg = dup_immediate(vform, temp, imm);
+ return cmp(vform, dst, src1, imm_reg, cond);
+}
+
+
+LogicVRegister Simulator::cmptst(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t ua = src1.Uint(vform, i);
+ uint64_t ub = src2.Uint(vform, i);
+ dst.SetUint(vform, i, ((ua & ub) != 0) ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::add(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ // TODO(all): consider assigning the result of LaneCountFromFormat to a local.
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for unsigned saturation.
+ uint64_t ua = src1.UintLeftJustified(vform, i);
+ uint64_t ub = src2.UintLeftJustified(vform, i);
+ uint64_t ur = ua + ub;
+ if (ur < ua) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ // Test for signed saturation.
+ int64_t sa = src1.IntLeftJustified(vform, i);
+ int64_t sb = src2.IntLeftJustified(vform, i);
+ int64_t sr = sa + sb;
+ // If the signs of the operands are the same, but different from the result,
+ // there was an overflow.
+ if (((sa >= 0) == (sb >= 0)) && ((sa >= 0) != (sr >= 0))) {
+ dst.SetSignedSat(i, sa >= 0);
+ }
+
+ dst.SetInt(vform, i, src1.Int(vform, i) + src2.Int(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uzp1(vform, temp1, src1, src2);
+ uzp2(vform, temp2, src1, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ mul(vform, temp, src1, src2);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ mul(vform, temp, src1, src2);
+ sub(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) * src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mul(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mla(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return mls(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return smlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return umlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmull(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmull2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlal(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlal2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlsl(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform =
+ VectorFormatHalfWidthDoubleLanes(VectorFormatFillQ(vform));
+ return sqdmlsl2(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return sqdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+LogicVRegister Simulator::sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ SimVRegister temp;
+ VectorFormat indexform = VectorFormatFillQ(vform);
+ return sqrdmulh(vform, dst, src1, dup_element(indexform, temp, src2, index));
+}
+
+
+uint16_t Simulator::PolynomialMult(uint8_t op1, uint8_t op2) {
+ uint16_t result = 0;
+ uint16_t extended_op2 = op2;
+ for (int i = 0; i < 8; ++i) {
+ if ((op1 >> i) & 1) {
+ result = result ^ (extended_op2 << i);
+ }
+ }
+ return result;
+}
+
+
+LogicVRegister Simulator::pmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i,
+ PolynomialMult(src1.Uint(vform, i), src2.Uint(vform, i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::pmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VectorFormat vform_src = VectorFormatHalfWidth(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, PolynomialMult(src1.Uint(vform_src, i),
+ src2.Uint(vform_src, i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::pmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ VectorFormat vform_src = VectorFormatHalfWidthDoubleLanes(vform);
+ dst.ClearForWrite(vform);
+ int lane_count = LaneCountFromFormat(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetUint(vform, i, PolynomialMult(src1.Uint(vform_src, lane_count + i),
+ src2.Uint(vform_src, lane_count + i)));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sub(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for unsigned saturation.
+ if (src2.Uint(vform, i) > src1.Uint(vform, i)) {
+ dst.SetUnsignedSat(i, false);
+ }
+
+ // Test for signed saturation.
+ int64_t sa = src1.IntLeftJustified(vform, i);
+ int64_t sb = src2.IntLeftJustified(vform, i);
+ int64_t sr = sa - sb;
+ // If the signs of the operands are different, and the sign of the first
+ // operand doesn't match the result, there was an overflow.
+ if (((sa >= 0) != (sb >= 0)) && ((sa >= 0) != (sr >= 0))) {
+ dst.SetSignedSat(i, sr < 0);
+ }
+
+ dst.SetInt(vform, i, src1.Int(vform, i) - src2.Int(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::and_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) & src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) | src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) | ~src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::eor(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) ^ src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src1.Uint(vform, i) & ~src2.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src.Uint(vform, i) & ~imm;
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bif(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = dst.Uint(vform, i);
+ uint64_t operand2 = ~src2.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = dst.Uint(vform, i);
+ uint64_t operand2 = src2.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::bsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t operand1 = src2.Uint(vform, i);
+ uint64_t operand2 = dst.Uint(vform, i);
+ uint64_t operand3 = src1.Uint(vform, i);
+ uint64_t result = operand1 ^ ((operand1 ^ operand3) & operand2);
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t src1_val = src1.Int(vform, i);
+ int64_t src2_val = src2.Int(vform, i);
+ int64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetInt(vform, i, dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::smax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sminmax(vform, dst, src1, src2, true);
+}
+
+
+LogicVRegister Simulator::smin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sminmax(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::sminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i += 2) {
+ int64_t src1_val = src.Int(vform, i);
+ int64_t src2_val = src.Int(vform, i + 1);
+ int64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetInt(vform, dst_index + (i >> 1), dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::smaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ sminmaxp(vform, dst, 0, src1, true);
+ sminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ sminmaxp(vform, dst, 0, src1, false);
+ sminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VIXL_ASSERT(vform == kFormatD);
+
+ int64_t dst_val = src.Int(kFormat2D, 0) + src.Int(kFormat2D, 1);
+ dst.ClearForWrite(vform);
+ dst.SetInt(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::addv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform));
+
+
+ int64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Int(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetInt(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2);
+
+ int64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Int(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetInt(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_dst
+ = ScalarFormatFromLaneSize(LaneSizeInBitsFromFormat(vform) * 2);
+
+ uint64_t dst_val = 0;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst_val += src.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform_dst);
+ dst.SetUint(vform_dst, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max) {
+ dst.ClearForWrite(vform);
+ int64_t dst_val = max ? INT64_MIN : INT64_MAX;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t src_val = src.Int(vform, i);
+ if (max == true) {
+ dst_val = (src_val > dst_val) ? src_val : dst_val;
+ } else {
+ dst_val = (src_val < dst_val) ? src_val : dst_val;
+ }
+ }
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetInt(vform, i, 0);
+ }
+ dst.SetInt(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ sminmaxv(vform, dst, src, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ sminmaxv(vform, dst, src, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t src1_val = src1.Uint(vform, i);
+ uint64_t src2_val = src2.Uint(vform, i);
+ uint64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetUint(vform, i, dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::umax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return uminmax(vform, dst, src1, src2, true);
+}
+
+
+LogicVRegister Simulator::umin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return uminmax(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::uminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i += 2) {
+ uint64_t src1_val = src.Uint(vform, i);
+ uint64_t src2_val = src.Uint(vform, i + 1);
+ uint64_t dst_val;
+ if (max == true) {
+ dst_val = (src1_val > src2_val) ? src1_val : src2_val;
+ } else {
+ dst_val = (src1_val < src2_val) ? src1_val : src2_val;
+ }
+ dst.SetUint(vform, dst_index + (i >> 1), dst_val);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::umaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ uminmaxp(vform, dst, 0, src1, true);
+ uminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ uminmaxp(vform, dst, 0, src1, false);
+ uminmaxp(vform, dst, LaneCountFromFormat(vform) >> 1, src2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max) {
+ dst.ClearForWrite(vform);
+ uint64_t dst_val = max ? 0 : UINT64_MAX;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t src_val = src.Uint(vform, i);
+ if (max == true) {
+ dst_val = (src_val > dst_val) ? src_val : dst_val;
+ } else {
+ dst_val = (src_val < dst_val) ? src_val : dst_val;
+ }
+ }
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, 0);
+ }
+ dst.SetUint(vform, 0, dst_val);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uminmaxv(vform, dst, src, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uminmaxv(vform, dst, src, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::shl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return ushl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = sxtl(vform, temp2, src);
+ return sshl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = sxtl2(vform, temp2, src);
+ return sshl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::shll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int shift = LaneSizeInBitsFromFormat(vform) / 2;
+ return sshll(vform, dst, src, shift);
+}
+
+
+LogicVRegister Simulator::shll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int shift = LaneSizeInBitsFromFormat(vform) / 2;
+ return sshll2(vform, dst, src, shift);
+}
+
+
+LogicVRegister Simulator::ushll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = uxtl(vform, temp2, src);
+ return ushl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::ushll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp1, temp2;
+ LogicVRegister shiftreg = dup_immediate(vform, temp1, shift);
+ LogicVRegister extendedreg = uxtl2(vform, temp2, src);
+ return ushl(vform, dst, extendedreg, shiftreg);
+}
+
+
+LogicVRegister Simulator::sli(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ dst.ClearForWrite(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t src_lane = src.Uint(vform, i);
+ uint64_t dst_lane = dst.Uint(vform, i);
+ uint64_t shifted = src_lane << shift;
+ uint64_t mask = MaxUintFromFormat(vform) << shift;
+ dst.SetUint(vform, i, (dst_lane & ~mask) | shifted);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return sshl(vform, dst, src, shiftreg).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return ushl(vform, dst, src, shiftreg).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqshlu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, shift);
+ return sshl(vform, dst, src, shiftreg).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sri(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ dst.ClearForWrite(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ VIXL_ASSERT((shift > 0) &&
+ (shift <= static_cast<int>(LaneSizeInBitsFromFormat(vform))));
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t src_lane = src.Uint(vform, i);
+ uint64_t dst_lane = dst.Uint(vform, i);
+ uint64_t shifted;
+ uint64_t mask;
+ if (shift == 64) {
+ shifted = 0;
+ mask = 0;
+ } else {
+ shifted = src_lane >> shift;
+ mask = MaxUintFromFormat(vform) >> shift;
+ }
+ dst.SetUint(vform, i, (dst_lane & ~mask) | shifted);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ushr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, -shift);
+ return ushl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::sshr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ VIXL_ASSERT(shift >= 0);
+ SimVRegister temp;
+ LogicVRegister shiftreg = dup_immediate(vform, temp, -shift);
+ return sshl(vform, dst, src, shiftreg);
+}
+
+
+LogicVRegister Simulator::ssra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = sshr(vform, temp, src, shift);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::usra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = ushr(vform, temp, src, shift);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::srsra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = sshr(vform, temp, src, shift).Round(vform);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::ursra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ LogicVRegister shifted_reg = ushr(vform, temp, src, shift).Round(vform);
+ return add(vform, dst, dst, shifted_reg);
+}
+
+
+LogicVRegister Simulator::cls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ result[i] = CountLeadingSignBits(src.Int(vform, i), laneSizeInBits);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::clz(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ result[i] = CountLeadingZeros(src.Uint(vform, i), laneSizeInBits);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::cnt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; i++) {
+ uint64_t value = src.Uint(vform, i);
+ result[i] = 0;
+ for (int j = 0; j < laneSizeInBits; j++) {
+ result[i] += (value & 1);
+ value >>= 1;
+ }
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int8_t shift_val = src2.Int(vform, i);
+ int64_t lj_src_val = src1.IntLeftJustified(vform, i);
+
+ // Set signed saturation state.
+ if ((shift_val > CountLeadingSignBits(lj_src_val)) &&
+ (lj_src_val != 0)) {
+ dst.SetSignedSat(i, lj_src_val >= 0);
+ }
+
+ // Set unsigned saturation state.
+ if (lj_src_val < 0) {
+ dst.SetUnsignedSat(i, false);
+ } else if ((shift_val > CountLeadingZeros(lj_src_val)) &&
+ (lj_src_val != 0)) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ int64_t src_val = src1.Int(vform, i);
+ if (shift_val > 63) {
+ dst.SetInt(vform, i, 0);
+ } else if (shift_val < -63) {
+ dst.SetRounding(i, src_val < 0);
+ dst.SetInt(vform, i, (src_val < 0) ? -1 : 0);
+ } else {
+ if (shift_val < 0) {
+ // Set rounding state. Rounding only needed on right shifts.
+ if (((src_val >> (-shift_val - 1)) & 1) == 1) {
+ dst.SetRounding(i, true);
+ }
+ src_val >>= -shift_val;
+ } else {
+ src_val <<= shift_val;
+ }
+ dst.SetInt(vform, i, src_val);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ushl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int8_t shift_val = src2.Int(vform, i);
+ uint64_t lj_src_val = src1.UintLeftJustified(vform, i);
+
+ // Set saturation state.
+ if ((shift_val > CountLeadingZeros(lj_src_val)) && (lj_src_val != 0)) {
+ dst.SetUnsignedSat(i, true);
+ }
+
+ uint64_t src_val = src1.Uint(vform, i);
+ if ((shift_val > 63) || (shift_val < -64)) {
+ dst.SetUint(vform, i, 0);
+ } else {
+ if (shift_val < 0) {
+ // Set rounding state. Rounding only needed on right shifts.
+ if (((src_val >> (-shift_val - 1)) & 1) == 1) {
+ dst.SetRounding(i, true);
+ }
+
+ if (shift_val == -64) {
+ src_val = 0;
+ } else {
+ src_val >>= -shift_val;
+ }
+ } else {
+ src_val <<= shift_val;
+ }
+ dst.SetUint(vform, i, src_val);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::neg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for signed saturation.
+ int64_t sa = src.Int(vform, i);
+ if (sa == MinIntFromFormat(vform)) {
+ dst.SetSignedSat(i, true);
+ }
+ dst.SetInt(vform, i, -sa);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::suqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t sa = dst.IntLeftJustified(vform, i);
+ uint64_t ub = src.UintLeftJustified(vform, i);
+ int64_t sr = sa + ub;
+
+ if (sr < sa) { // Test for signed positive saturation.
+ dst.SetInt(vform, i, MaxIntFromFormat(vform));
+ } else {
+ dst.SetInt(vform, i, dst.Int(vform, i) + src.Int(vform, i));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::usqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t ua = dst.UintLeftJustified(vform, i);
+ int64_t sb = src.IntLeftJustified(vform, i);
+ uint64_t ur = ua + sb;
+
+ if ((sb > 0) && (ur <= ua)) {
+ dst.SetUint(vform, i, MaxUintFromFormat(vform)); // Positive saturation.
+ } else if ((sb < 0) && (ur >= ua)) {
+ dst.SetUint(vform, i, 0); // Negative saturation.
+ } else {
+ dst.SetUint(vform, i, dst.Uint(vform, i) + src.Int(vform, i));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::abs(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ // Test for signed saturation.
+ int64_t sa = src.Int(vform, i);
+ if (sa == MinIntFromFormat(vform)) {
+ dst.SetSignedSat(i, true);
+ }
+ if (sa < 0) {
+ dst.SetInt(vform, i, -sa);
+ } else {
+ dst.SetInt(vform, i, sa);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::extractnarrow(VectorFormat dstform,
+ LogicVRegister dst,
+ bool dstIsSigned,
+ const LogicVRegister& src,
+ bool srcIsSigned) {
+ bool upperhalf = false;
+ VectorFormat srcform = kFormatUndefined;
+ int64_t ssrc[8];
+ uint64_t usrc[8];
+
+ switch (dstform) {
+ case kFormat8B : upperhalf = false; srcform = kFormat8H; break;
+ case kFormat16B: upperhalf = true; srcform = kFormat8H; break;
+ case kFormat4H : upperhalf = false; srcform = kFormat4S; break;
+ case kFormat8H : upperhalf = true; srcform = kFormat4S; break;
+ case kFormat2S : upperhalf = false; srcform = kFormat2D; break;
+ case kFormat4S : upperhalf = true; srcform = kFormat2D; break;
+ case kFormatB : upperhalf = false; srcform = kFormatH; break;
+ case kFormatH : upperhalf = false; srcform = kFormatS; break;
+ case kFormatS : upperhalf = false; srcform = kFormatD; break;
+ default:VIXL_UNIMPLEMENTED();
+ }
+
+ for (int i = 0; i < LaneCountFromFormat(srcform); i++) {
+ ssrc[i] = src.Int(srcform, i);
+ usrc[i] = src.Uint(srcform, i);
+ }
+
+ int offset;
+ if (upperhalf) {
+ offset = LaneCountFromFormat(dstform) / 2;
+ } else {
+ offset = 0;
+ dst.ClearForWrite(dstform);
+ }
+
+ for (int i = 0; i < LaneCountFromFormat(srcform); i++) {
+ // Test for signed saturation
+ if (ssrc[i] > MaxIntFromFormat(dstform)) {
+ dst.SetSignedSat(offset + i, true);
+ } else if (ssrc[i] < MinIntFromFormat(dstform)) {
+ dst.SetSignedSat(offset + i, false);
+ }
+
+ // Test for unsigned saturation
+ if (srcIsSigned) {
+ if (ssrc[i] > static_cast<int64_t>(MaxUintFromFormat(dstform))) {
+ dst.SetUnsignedSat(offset + i, true);
+ } else if (ssrc[i] < 0) {
+ dst.SetUnsignedSat(offset + i, false);
+ }
+ } else {
+ if (usrc[i] > MaxUintFromFormat(dstform)) {
+ dst.SetUnsignedSat(offset + i, true);
+ }
+ }
+
+ int64_t result;
+ if (srcIsSigned) {
+ result = ssrc[i] & MaxUintFromFormat(dstform);
+ } else {
+ result = usrc[i] & MaxUintFromFormat(dstform);
+ }
+
+ if (dstIsSigned) {
+ dst.SetInt(dstform, offset + i, result);
+ } else {
+ dst.SetUint(dstform, offset + i, result);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::xtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, true, src, true);
+}
+
+
+LogicVRegister Simulator::sqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, true, src, true).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqxtun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, false, src, true).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return extractnarrow(vform, dst, false, src, false).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::absdiff(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool issigned) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (issigned) {
+ int64_t sr = src1.Int(vform, i) - src2.Int(vform, i);
+ sr = sr > 0 ? sr : -sr;
+ dst.SetInt(vform, i, sr);
+ } else {
+ int64_t sr = src1.Uint(vform, i) - src2.Uint(vform, i);
+ sr = sr > 0 ? sr : -sr;
+ dst.SetUint(vform, i, sr);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::saba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ dst.ClearForWrite(vform);
+ absdiff(vform, temp, src1, src2, true);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ dst.ClearForWrite(vform);
+ absdiff(vform, temp, src1, src2, false);
+ add(vform, dst, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::not_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, ~src.Uint(vform, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rbit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int laneSizeInBits = LaneSizeInBitsFromFormat(vform);
+ uint64_t reversed_value;
+ uint64_t value;
+ for (int i = 0; i < laneCount; i++) {
+ value = src.Uint(vform, i);
+ reversed_value = 0;
+ for (int j = 0; j < laneSizeInBits; j++) {
+ reversed_value = (reversed_value << 1) | (value & 1);
+ value >>= 1;
+ }
+ result[i] = reversed_value;
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rev(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int revSize) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int laneSize = LaneSizeInBytesFromFormat(vform);
+ int lanesPerLoop = revSize / laneSize;
+ for (int i = 0; i < laneCount; i += lanesPerLoop) {
+ for (int j = 0; j < lanesPerLoop; j++) {
+ result[i + lanesPerLoop - 1 - j] = src.Uint(vform, i + j);
+ }
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::rev16(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 2);
+}
+
+
+LogicVRegister Simulator::rev32(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 4);
+}
+
+
+LogicVRegister Simulator::rev64(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return rev(vform, dst, src, 8);
+}
+
+
+LogicVRegister Simulator::addlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool is_signed,
+ bool do_accumulate) {
+ VectorFormat vformsrc = VectorFormatHalfWidthDoubleLanes(vform);
+
+ int64_t sr[16];
+ uint64_t ur[16];
+
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ if (is_signed) {
+ sr[i] = src.Int(vformsrc, 2 * i) + src.Int(vformsrc, 2 * i + 1);
+ } else {
+ ur[i] = src.Uint(vformsrc, 2 * i) + src.Uint(vformsrc, 2 * i + 1);
+ }
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ if (do_accumulate) {
+ if (is_signed) {
+ dst.SetInt(vform, i, dst.Int(vform, i) + sr[i]);
+ } else {
+ dst.SetUint(vform, i, dst.Uint(vform, i) + ur[i]);
+ }
+ } else {
+ if (is_signed) {
+ dst.SetInt(vform, i, sr[i]);
+ } else {
+ dst.SetUint(vform, i, ur[i]);
+ }
+ }
+ }
+
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, true, false);
+}
+
+
+LogicVRegister Simulator::uaddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, false, false);
+}
+
+
+LogicVRegister Simulator::sadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, true, true);
+}
+
+
+LogicVRegister Simulator::uadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return addlp(vform, dst, src, false, true);
+}
+
+
+LogicVRegister Simulator::ext(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ uint8_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount - index; ++i) {
+ result[i] = src1.Uint(vform, i + index);
+ }
+ for (int i = 0; i < index; ++i) {
+ result[laneCount - index + i] = src2.Uint(vform, i);
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::dup_element(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int src_index) {
+ int laneCount = LaneCountFromFormat(vform);
+ uint64_t value = src.Uint(vform, src_index);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, value);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::dup_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ uint64_t value = imm & MaxUintFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, value);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ins_element(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ int src_index) {
+ dst.SetUint(vform, dst_index, src.Uint(vform, src_index));
+ return dst;
+}
+
+
+LogicVRegister Simulator::ins_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ uint64_t imm) {
+ uint64_t value = imm & MaxUintFromFormat(vform);
+ dst.SetUint(vform, dst_index, value);
+ return dst;
+}
+
+
+LogicVRegister Simulator::mov(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int lane = 0; lane < LaneCountFromFormat(vform); lane++) {
+ dst.SetUint(vform, lane, src.Uint(vform, lane));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::movi(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, imm);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::mvni(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm) {
+ int laneCount = LaneCountFromFormat(vform);
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, ~imm);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src.Uint(vform, i) | imm;
+ }
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetUint(vform, i, src.Uint(vform_half, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetInt(vform, i, src.Int(vform_half, i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+ int lane_count = LaneCountFromFormat(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetUint(vform, i, src.Uint(vform_half, lane_count + i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VectorFormat vform_half = VectorFormatHalfWidth(vform);
+ int lane_count = LaneCountFromFormat(vform);
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetInt(vform, i, src.Int(vform_half, lane_count + i));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::shrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vform_src = VectorFormatDoubleWidth(vform);
+ VectorFormat vform_dst = vform;
+ LogicVRegister shifted_src = ushr(vform_src, temp, src, shift);
+ return extractnarrow(vform_dst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::shrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::rshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::rshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = ushr(vformsrc, temp, src, shift).Round(vformsrc);
+ return extractnarrow(vformdst, dst, false, shifted_src, false);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind) {
+ SimVRegister result;
+ movi(vform, result, 0);
+ tbx(vform, result, tab, ind);
+ return orr(vform, dst, result, result);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind) {
+ SimVRegister result;
+ movi(vform, result, 0);
+ tbx(vform, result, tab, tab2, ind);
+ return orr(vform, dst, result, result);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind) {
+ SimVRegister result;
+ movi(vform, result, 0);
+ tbx(vform, result, tab, tab2, tab3, ind);
+ return orr(vform, dst, result, result);
+}
+
+
+LogicVRegister Simulator::tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind) {
+ SimVRegister result;
+ movi(vform, result, 0);
+ tbx(vform, result, tab, tab2, tab3, tab4, ind);
+ return orr(vform, dst, result, result);
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ case 2: dst.SetUint(vform, i, tab3.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t j = ind.Uint(vform, i);
+ switch (j >> 4) {
+ case 0: dst.SetUint(vform, i, tab.Uint(kFormat16B, j & 15)); break;
+ case 1: dst.SetUint(vform, i, tab2.Uint(kFormat16B, j & 15)); break;
+ case 2: dst.SetUint(vform, i, tab3.Uint(kFormat16B, j & 15)); break;
+ case 3: dst.SetUint(vform, i, tab4.Uint(kFormat16B, j & 15)); break;
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return shrn(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return shrn2(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return rshrn(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::uqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ return rshrn2(vform, dst, src, shift).UnsignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtn(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(vform);
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::sqrshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift) {
+ SimVRegister temp;
+ VectorFormat vformsrc = VectorFormatDoubleWidth(VectorFormatHalfLanes(vform));
+ VectorFormat vformdst = vform;
+ LogicVRegister shifted_src = sshr(vformsrc, temp, src, shift).Round(vformsrc);
+ return sqxtun(vformdst, dst, shifted_src);
+}
+
+
+LogicVRegister Simulator::uaddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uaddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl2(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ add(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::saddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl2(vform, temp, src2);
+ add(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::usubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ uxtl2(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ sub(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::ssubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sxtl2(vform, temp, src2);
+ sub(vform, dst, src1, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ uaba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ uaba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ saba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ saba(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabdl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::uabdl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, false);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabdl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sabdl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ absdiff(vform, dst, temp1, temp2, true);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mul(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mls(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl(vform, temp1, src1);
+ uxtl(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ uxtl2(vform, temp1, src1);
+ uxtl2(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl(vform, temp1, src1);
+ sxtl(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp1, temp2;
+ sxtl2(vform, temp1, src1);
+ sxtl2(vform, temp2, src2);
+ mla(vform, dst, temp1, temp2);
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull(vform, temp, src1, src2);
+ return add(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull2(vform, temp, src1, src2);
+ return add(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull(vform, temp, src1, src2);
+ return sub(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = sqdmull2(vform, temp, src1, src2);
+ return sub(vform, dst, dst, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = smull(vform, temp, src1, src2);
+ return add(vform, dst, product, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = smull2(vform, temp, src1, src2);
+ return add(vform, dst, product, product).SignedSaturate(vform);
+}
+
+
+LogicVRegister Simulator::sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool round) {
+ // 2 * INT_32_MIN * INT_32_MIN causes int64_t to overflow.
+ // To avoid this, we use (src1 * src2 + 1 << (esize - 2)) >> (esize - 1)
+ // which is same as (2 * src1 * src2 + 1 << (esize - 1)) >> esize.
+
+ int esize = LaneSizeInBitsFromFormat(vform);
+ int round_const = round ? (1 << (esize - 2)) : 0;
+ int64_t product;
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ product = src1.Int(vform, i) * src2.Int(vform, i);
+ product += round_const;
+ product = product >> (esize - 1);
+
+ if (product > MaxIntFromFormat(vform)) {
+ product = MaxIntFromFormat(vform);
+ } else if (product < MinIntFromFormat(vform)) {
+ product = MinIntFromFormat(vform);
+ }
+ dst.SetInt(vform, i, product);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ return sqrdmulh(vform, dst, src1, src2, false);
+}
+
+
+LogicVRegister Simulator::addhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::addhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::raddhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::raddhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ add(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::subhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ shrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::subhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ shrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::rsubhn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(vform), temp, src1, src2);
+ rshrn(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::rsubhn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ sub(VectorFormatDoubleWidth(VectorFormatHalfLanes(vform)), temp, src1, src2);
+ rshrn2(vform, dst, temp, LaneSizeInBitsFromFormat(vform));
+ return dst;
+}
+
+
+LogicVRegister Simulator::trn1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, 2 * i);
+ result[(2 * i) + 1] = src2.Uint(vform, 2 * i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::trn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, (2 * i) + 1);
+ result[(2 * i) + 1] = src2.Uint(vform, (2 * i) + 1);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::zip1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, i);
+ result[(2 * i) + 1] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::zip2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[16];
+ int laneCount = LaneCountFromFormat(vform);
+ int pairs = laneCount / 2;
+ for (int i = 0; i < pairs; ++i) {
+ result[2 * i] = src1.Uint(vform, pairs + i);
+ result[(2 * i) + 1] = src2.Uint(vform, pairs + i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uzp1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[32];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src1.Uint(vform, i);
+ result[laneCount + i] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[2 * i]);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::uzp2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ uint64_t result[32];
+ int laneCount = LaneCountFromFormat(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ result[i] = src1.Uint(vform, i);
+ result[laneCount + i] = src2.Uint(vform, i);
+ }
+
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < laneCount; ++i) {
+ dst.SetUint(vform, i, result[ (2 * i) + 1]);
+ }
+ return dst;
+}
+
+
+template <typename T>
+T Simulator::FPAdd(T op1, T op2) {
+ T result = FPProcessNaNs(op1, op2);
+ if (std::isnan(result)) return result;
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
+ // inf + -inf returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 + op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSub(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
+ // inf - inf returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 - op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMul(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ // Other cases should be handled by standard arithmetic.
+ return op1 * op2;
+ }
+}
+
+
+template<typename T>
+T Simulator::FPMulx(T op1, T op2) {
+ if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
+ // inf * 0.0 returns +/-2.0.
+ T two = 2.0;
+ return copysign(1.0, op1) * copysign(1.0, op2) * two;
+ }
+ return FPMul(op1, op2);
+}
+
+
+template<typename T>
+T Simulator::FPMulAdd(T a, T op1, T op2) {
+ T result = FPProcessNaNs3(a, op1, op2);
+
+ T sign_a = copysign(1.0, a);
+ T sign_prod = copysign(1.0, op1) * copysign(1.0, op2);
+ bool isinf_prod = std::isinf(op1) || std::isinf(op2);
+ bool operation_generates_nan =
+ (std::isinf(op1) && (op2 == 0.0)) || // inf * 0.0
+ (std::isinf(op2) && (op1 == 0.0)) || // 0.0 * inf
+ (std::isinf(a) && isinf_prod && (sign_a != sign_prod)); // inf - inf
+
+ if (std::isnan(result)) {
+ // Generated NaNs override quiet NaNs propagated from a.
+ if (operation_generates_nan && IsQuietNaN(a)) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ return result;
+ }
+ }
+
+ // If the operation would produce a NaN, return the default NaN.
+ if (operation_generates_nan) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ }
+
+ // Work around broken fma implementations for exact zero results: The sign of
+ // exact 0.0 results is positive unless both a and op1 * op2 are negative.
+ if (((op1 == 0.0) || (op2 == 0.0)) && (a == 0.0)) {
+ return ((sign_a < 0) && (sign_prod < 0)) ? -0.0 : 0.0;
+ }
+
+ result = FusedMultiplyAdd(op1, op2, a);
+ VIXL_ASSERT(!std::isnan(result));
+
+ // Work around broken fma implementations for rounded zero results: If a is
+ // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
+ if ((a == 0.0) && (result == 0.0)) {
+ return copysign(0.0, sign_prod);
+ }
+
+ return result;
+}
+
+
+template <typename T>
+T Simulator::FPDiv(T op1, T op2) {
+ // NaNs should be handled elsewhere.
+ VIXL_ASSERT(!std::isnan(op1) && !std::isnan(op2));
+
+ if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
+ // inf / inf and 0.0 / 0.0 return the default NaN.
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ if (op2 == 0.0) FPProcessException();
+
+ // Other cases should be handled by standard arithmetic.
+ return op1 / op2;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPSqrt(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op < 0.0) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else {
+ return sqrt(op);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMax(T a, T b) {
+ T result = FPProcessNaNs(a, b);
+ if (std::isnan(result)) return result;
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return +0.0.
+ return 0.0;
+ } else {
+ return (a > b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMaxNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64NegativeInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64NegativeInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMax(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPMin(T a, T b) {
+ T result = FPProcessNaNs(a, b);
+ if (std::isnan(result)) return result;
+
+ if ((a == 0.0) && (b == 0.0) &&
+ (copysign(1.0, a) != copysign(1.0, b))) {
+ // a and b are zero, and the sign differs: return -0.0.
+ return -0.0;
+ } else {
+ return (a < b) ? a : b;
+ }
+}
+
+
+template <typename T>
+T Simulator::FPMinNM(T a, T b) {
+ if (IsQuietNaN(a) && !IsQuietNaN(b)) {
+ a = kFP64PositiveInfinity;
+ } else if (!IsQuietNaN(a) && IsQuietNaN(b)) {
+ b = kFP64PositiveInfinity;
+ }
+
+ T result = FPProcessNaNs(a, b);
+ return std::isnan(result) ? result : FPMin(a, b);
+}
+
+
+template <typename T>
+T Simulator::FPRecipStepFused(T op1, T op2) {
+ const T two = 2.0;
+ if ((std::isinf(op1) && (op2 == 0.0))
+ || ((op1 == 0.0) && (std::isinf(op2)))) {
+ return two;
+ } else if (std::isinf(op1) || std::isinf(op2)) {
+ // Return +inf if signs match, otherwise -inf.
+ return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity
+ : kFP64NegativeInfinity;
+ } else {
+ return FusedMultiplyAdd(op1, op2, two);
+ }
+}
+
+
+template <typename T>
+T Simulator::FPRSqrtStepFused(T op1, T op2) {
+ const T one_point_five = 1.5;
+ const T two = 2.0;
+
+ if ((std::isinf(op1) && (op2 == 0.0))
+ || ((op1 == 0.0) && (std::isinf(op2)))) {
+ return one_point_five;
+ } else if (std::isinf(op1) || std::isinf(op2)) {
+ // Return +inf if signs match, otherwise -inf.
+ return ((op1 >= 0.0) == (op2 >= 0.0)) ? kFP64PositiveInfinity
+ : kFP64NegativeInfinity;
+ } else {
+ // The multiply-add-halve operation must be fully fused, so avoid interim
+ // rounding by checking which operand can be losslessly divided by two
+ // before doing the multiply-add.
+ if (std::isnormal(op1 / two)) {
+ return FusedMultiplyAdd(op1 / two, op2, one_point_five);
+ } else if (std::isnormal(op2 / two)) {
+ return FusedMultiplyAdd(op1, op2 / two, one_point_five);
+ } else {
+ // Neither operand is normal after halving: the result is dominated by
+ // the addition term, so just return that.
+ return one_point_five;
+ }
+ }
+}
+
+int32_t Simulator::FPToFixedJS(double value) {
+ // The Z-flag is set when the conversion from double precision floating-point
+ // to 32-bit integer is exact. If the source value is +/-Infinity, -0.0, NaN,
+ // outside the bounds of a 32-bit integer, or isn't an exact integer then the
+ // Z-flag is unset.
+ int Z = 1;
+ int32_t result;
+
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ // +/- zero and infinity all return zero, however -0 and +/- Infinity also
+ // unset the Z-flag.
+ result = 0.0;
+ if ((value != 0.0) || std::signbit(value)) {
+ Z = 0;
+ }
+ } else if (std::isnan(value)) {
+ // NaN values unset the Z-flag and set the result to 0.
+ FPProcessNaN(value);
+ result = 0;
+ Z = 0;
+ } else {
+ // All other values are converted to an integer representation, rounded
+ // toward zero.
+ double int_result = std::floor(value);
+ double error = value - int_result;
+
+ if ((error != 0.0) && (int_result < 0.0)) {
+ int_result++;
+ }
+
+ // Constrain the value into the range [INT32_MIN, INT32_MAX]. We can almost
+ // write a one-liner with std::round, but the behaviour on ties is incorrect
+ // for our purposes.
+ double mod_const = static_cast<double>(UINT64_C(1) << 32);
+ double mod_error =
+ (int_result / mod_const) - std::floor(int_result / mod_const);
+ double constrained;
+ if (mod_error == 0.5) {
+ constrained = INT32_MIN;
+ } else {
+ constrained = int_result - mod_const * round(int_result / mod_const);
+ }
+
+ VIXL_ASSERT(std::floor(constrained) == constrained);
+ VIXL_ASSERT(constrained >= INT32_MIN);
+ VIXL_ASSERT(constrained <= INT32_MAX);
+
+ // Take the bottom 32 bits of the result as a 32-bit integer.
+ result = static_cast<int32_t>(constrained);
+
+ if ((int_result < INT32_MIN) || (int_result > INT32_MAX) ||
+ (error != 0.0)) {
+ // If the integer result is out of range or the conversion isn't exact,
+ // take exception and unset the Z-flag.
+ FPProcessException();
+ Z = 0;
+ }
+ }
+
+ ReadNzcv().SetN(0);
+ ReadNzcv().SetZ(Z);
+ ReadNzcv().SetC(0);
+ ReadNzcv().SetV(0);
+
+ return result;
+}
+
+
+double Simulator::FPRoundInt(double value, FPRounding round_mode) {
+ if ((value == 0.0) || (value == kFP64PositiveInfinity) ||
+ (value == kFP64NegativeInfinity)) {
+ return value;
+ } else if (std::isnan(value)) {
+ return FPProcessNaN(value);
+ }
+
+ double int_result = std::floor(value);
+ double error = value - int_result;
+ switch (round_mode) {
+ case FPTieAway: {
+ // Take care of correctly handling the range ]-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ } else if ((error > 0.5) || ((error == 0.5) && (int_result >= 0.0))) {
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is positive, round up.
+ int_result++;
+ }
+ break;
+ }
+ case FPTieEven: {
+ // Take care of correctly handling the range [-0.5, -0.0], which must
+ // yield -0.0.
+ if ((-0.5 <= value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is greater than 0.5, or is equal to 0.5 and the integer
+ // result is odd, round up.
+ } else if ((error > 0.5) ||
+ ((error == 0.5) && (std::fmod(int_result, 2) != 0))) {
+ int_result++;
+ }
+ break;
+ }
+ case FPZero: {
+ // If value>0 then we take floor(value)
+ // otherwise, ceil(value).
+ if (value < 0) {
+ int_result = ceil(value);
+ }
+ break;
+ }
+ case FPNegativeInfinity: {
+ // We always use floor(value).
+ break;
+ }
+ case FPPositiveInfinity: {
+ // Take care of correctly handling the range ]-1.0, -0.0], which must
+ // yield -0.0.
+ if ((-1.0 < value) && (value < 0.0)) {
+ int_result = -0.0;
+
+ // If the error is non-zero, round up.
+ } else if (error > 0.0) {
+ int_result++;
+ }
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+ return int_result;
+}
+
+
+int32_t Simulator::FPToInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxInt) {
+ return kWMaxInt;
+ } else if (value < kWMinInt) {
+ return kWMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int32_t>(value);
+}
+
+
+int64_t Simulator::FPToInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ // The compiler would have to round kXMaxInt, triggering a warning. Compare
+ // against the largest int64_t that is exactly representable as a double.
+ if (value > kXMaxExactInt) {
+ return kXMaxInt;
+ } else if (value < kXMinInt) {
+ return kXMinInt;
+ }
+ return std::isnan(value) ? 0 : static_cast<int64_t>(value);
+}
+
+
+uint32_t Simulator::FPToUInt32(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ if (value >= kWMaxUInt) {
+ return kWMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint32_t>(value);
+}
+
+
+uint64_t Simulator::FPToUInt64(double value, FPRounding rmode) {
+ value = FPRoundInt(value, rmode);
+ // The compiler would have to round kXMaxUInt, triggering a warning. Compare
+ // against the largest uint64_t that is exactly representable as a double.
+ if (value > kXMaxExactUInt) {
+ return kXMaxUInt;
+ } else if (value < 0.0) {
+ return 0;
+ }
+ return std::isnan(value) ? 0 : static_cast<uint64_t>(value);
+}
+
+
+#define DEFINE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \
+template <typename T> \
+LogicVRegister Simulator::FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ dst.ClearForWrite(vform); \
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) { \
+ T op1 = src1.Float<T>(i); \
+ T op2 = src2.Float<T>(i); \
+ T result; \
+ if (PROCNAN) { \
+ result = FPProcessNaNs(op1, op2); \
+ if (!std::isnan(result)) { \
+ result = OP(op1, op2); \
+ } \
+ } else { \
+ result = OP(op1, op2); \
+ } \
+ dst.SetFloat(i, result); \
+ } \
+ return dst; \
+} \
+ \
+LogicVRegister Simulator::FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) { \
+ FN<float>(vform, dst, src1, src2); \
+ } else { \
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize); \
+ FN<double>(vform, dst, src1, src2); \
+ } \
+ return dst; \
+}
+NEON_FP3SAME_LIST(DEFINE_NEON_FP_VECTOR_OP)
+#undef DEFINE_NEON_FP_VECTOR_OP
+
+
+LogicVRegister Simulator::fnmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ LogicVRegister product = fmul(vform, temp, src1, src2);
+ return fneg(vform, dst, product);
+}
+
+
+template <typename T>
+LogicVRegister Simulator::frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T result = FPProcessNaNs(op1, op2);
+ dst.SetFloat(i, std::isnan(result) ? result : FPRecipStepFused(op1, op2));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frecps<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frecps<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T result = FPProcessNaNs(op1, op2);
+ dst.SetFloat(i, std::isnan(result) ? result : FPRSqrtStepFused(op1, op2));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frsqrts<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frsqrts<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ bool result = false;
+ T op1 = src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T nan_result = FPProcessNaNs(op1, op2);
+ if (!std::isnan(nan_result)) {
+ switch (cond) {
+ case eq: result = (op1 == op2); break;
+ case ge: result = (op1 >= op2); break;
+ case gt: result = (op1 > op2) ; break;
+ case le: result = (op1 <= op2); break;
+ case lt: result = (op1 < op2) ; break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+ }
+ dst.SetUint(vform, i, result ? MaxUintFromFormat(vform) : 0);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fcmp<float>(vform, dst, src1, src2, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fcmp<double>(vform, dst, src1, src2, cond);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcmp_zero(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ Condition cond) {
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister zero_reg = dup_immediate(vform, temp, FloatToRawbits(0.0));
+ fcmp<float>(vform, dst, src, zero_reg, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister zero_reg = dup_immediate(vform, temp,
+ DoubleToRawbits(0.0));
+ fcmp<double>(vform, dst, src, zero_reg, cond);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabscmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond) {
+ SimVRegister temp1, temp2;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister abs_src1 = fabs_<float>(vform, temp1, src1);
+ LogicVRegister abs_src2 = fabs_<float>(vform, temp2, src2);
+ fcmp<float>(vform, dst, abs_src1, abs_src2, cond);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister abs_src1 = fabs_<double>(vform, temp1, src1);
+ LogicVRegister abs_src2 = fabs_<double>(vform, temp2, src2);
+ fcmp<double>(vform, dst, abs_src1, abs_src2, cond);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T acc = dst.Float<T>(i);
+ T result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fmla<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fmla<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op1 = -src1.Float<T>(i);
+ T op2 = src2.Float<T>(i);
+ T acc = dst.Float<T>(i);
+ T result = FPMulAdd(acc, op1, op2);
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fmls<float>(vform, dst, src1, src2);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fmls<double>(vform, dst, src1, src2);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ op = -op;
+ dst.SetFloat(i, op);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fneg<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fneg<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+
+template <typename T>
+LogicVRegister Simulator::fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ if (copysign(1.0, op) < 0.0) {
+ op = -op;
+ }
+ dst.SetFloat(i, op);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ fabs_<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ fabs_<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fabd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2) {
+ SimVRegister temp;
+ fsub(vform, temp, src1, src2);
+ fabs_(vform, dst, temp);
+ return dst;
+}
+
+
+LogicVRegister Simulator::fsqrt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float result = FPSqrt(src.Float<float>(i));
+ dst.SetFloat(i, result);
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double result = FPSqrt(src.Float<double>(i));
+ dst.SetFloat(i, result);
+ }
+ }
+ return dst;
+}
+
+
+#define DEFINE_NEON_FP_PAIR_OP(FNP, FN, OP) \
+LogicVRegister Simulator::FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2) { \
+ SimVRegister temp1, temp2; \
+ uzp1(vform, temp1, src1, src2); \
+ uzp2(vform, temp2, src1, src2); \
+ FN(vform, dst, temp1, temp2); \
+ return dst; \
+} \
+ \
+LogicVRegister Simulator::FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src) { \
+ if (vform == kFormatS) { \
+ float result = OP(src.Float<float>(0), src.Float<float>(1)); \
+ dst.SetFloat(0, result); \
+ } else { \
+ VIXL_ASSERT(vform == kFormatD); \
+ double result = OP(src.Float<double>(0), src.Float<double>(1)); \
+ dst.SetFloat(0, result); \
+ } \
+ dst.ClearForWrite(vform); \
+ return dst; \
+}
+NEON_FPPAIRWISE_LIST(DEFINE_NEON_FP_PAIR_OP)
+#undef DEFINE_NEON_FP_PAIR_OP
+
+
+LogicVRegister Simulator::fminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPMinMaxOp Op) {
+ VIXL_ASSERT(vform == kFormat4S);
+ USE(vform);
+ float result1 = (this->*Op)(src.Float<float>(0), src.Float<float>(1));
+ float result2 = (this->*Op)(src.Float<float>(2), src.Float<float>(3));
+ float result = (this->*Op)(result1, result2);
+ dst.ClearForWrite(kFormatS);
+ dst.SetFloat<float>(0, result);
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMax);
+}
+
+
+LogicVRegister Simulator::fminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMin);
+}
+
+
+LogicVRegister Simulator::fmaxnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMaxNM);
+}
+
+
+LogicVRegister Simulator::fminnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ return fminmaxv(vform, dst, src, &Simulator::FPMinNM);
+}
+
+
+LogicVRegister Simulator::fmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmul<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmul<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmla<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmla<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmls<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmls<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fmulx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index) {
+ dst.ClearForWrite(vform);
+ SimVRegister temp;
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ LogicVRegister index_reg = dup_element(kFormat4S, temp, src2, index);
+ fmulx<float>(vform, dst, src1, index_reg);
+
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ LogicVRegister index_reg = dup_element(kFormat2D, temp, src2, index);
+ fmulx<double>(vform, dst, src1, index_reg);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frint(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ bool inexact_exception) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ float rounded = FPRoundInt(input, rounding_mode);
+ if (inexact_exception && !std::isnan(input) && (input != rounded)) {
+ FPProcessException();
+ }
+ dst.SetFloat<float>(i, rounded);
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ double rounded = FPRoundInt(input, rounding_mode);
+ if (inexact_exception && !std::isnan(input) && (input != rounded)) {
+ FPProcessException();
+ }
+ dst.SetFloat<double>(i, rounded);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op = src.Float<float>(i) * std::pow(2.0f, fbits);
+ dst.SetInt(vform, i, FPToInt32(op, rounding_mode));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double op = src.Float<double>(i) * std::pow(2.0, fbits);
+ dst.SetInt(vform, i, FPToInt64(op, rounding_mode));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float op = src.Float<float>(i) * std::pow(2.0f, fbits);
+ dst.SetUint(vform, i, FPToUInt32(op, rounding_mode));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double op = src.Float<double>(i) * std::pow(2.0, fbits);
+ dst.SetUint(vform, i, FPToUInt64(op, rounding_mode));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
+ // TODO: Full support for SimFloat16 in SimRegister(s).
+ dst.SetFloat(i,
+ FPToFloat(RawbitsToFloat16(src.Float<uint16_t>(i)),
+ ReadDN()));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = LaneCountFromFormat(vform) - 1; i >= 0; i--) {
+ dst.SetFloat(i, FPToDouble(src.Float<float>(i), ReadDN()));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int lane_count = LaneCountFromFormat(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < lane_count; i++) {
+ // TODO: Full support for SimFloat16 in SimRegister(s).
+ dst.SetFloat(i,
+ FPToFloat(RawbitsToFloat16(
+ src.Float<uint16_t>(i + lane_count)),
+ ReadDN()));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < lane_count; i++) {
+ dst.SetFloat(i, FPToDouble(src.Float<float>(i + lane_count), ReadDN()));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ SimVRegister tmp;
+ LogicVRegister srctmp = mov(kFormat2D, tmp, src);
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kHRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i,
+ Float16ToRawbits(FPToFloat16(srctmp.Float<float>(i),
+ FPTieEven,
+ ReadDN())));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat(srctmp.Float<double>(i), FPTieEven, ReadDN()));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ int lane_count = LaneCountFromFormat(vform) / 2;
+ if (LaneSizeInBitsFromFormat(vform) == kHRegSize) {
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count,
+ Float16ToRawbits(
+ FPToFloat16(src.Float<float>(i), FPTieEven, ReadDN())));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count,
+ FPToFloat(src.Float<double>(i), FPTieEven, ReadDN()));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtxn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ SimVRegister tmp;
+ LogicVRegister srctmp = mov(kFormat2D, tmp, src);
+ dst.ClearForWrite(vform);
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ dst.SetFloat(i, FPToFloat(srctmp.Float<double>(i), FPRoundOdd, ReadDN()));
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::fcvtxn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kSRegSize);
+ int lane_count = LaneCountFromFormat(vform) / 2;
+ for (int i = lane_count - 1; i >= 0; i--) {
+ dst.SetFloat(i + lane_count,
+ FPToFloat(src.Float<double>(i), FPRoundOdd, ReadDN()));
+ }
+ return dst;
+}
+
+
+// Based on reference C function recip_sqrt_estimate from ARM ARM.
+double Simulator::recip_sqrt_estimate(double a) {
+ int q0, q1, s;
+ double r;
+ if (a < 0.5) {
+ q0 = static_cast<int>(a * 512.0);
+ r = 1.0 / sqrt((static_cast<double>(q0) + 0.5) / 512.0);
+ } else {
+ q1 = static_cast<int>(a * 256.0);
+ r = 1.0 / sqrt((static_cast<double>(q1) + 0.5) / 256.0);
+ }
+ s = static_cast<int>(256.0 * r + 0.5);
+ return static_cast<double>(s) / 256.0;
+}
+
+
+static inline uint64_t Bits(uint64_t val, int start_bit, int end_bit) {
+ return ExtractUnsignedBitfield64(start_bit, end_bit, val);
+}
+
+
+template <typename T>
+T Simulator::FPRecipSqrtEstimate(T op) {
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (op == 0.0) {
+ if (copysign(1.0, op) < 0.0) {
+ return kFP64NegativeInfinity;
+ } else {
+ return kFP64PositiveInfinity;
+ }
+ } else if (copysign(1.0, op) < 0.0) {
+ FPProcessException();
+ return FPDefaultNaN<T>();
+ } else if (std::isinf(op)) {
+ return 0.0;
+ } else {
+ uint64_t fraction;
+ int exp, result_exp;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ exp = FloatExp(op);
+ fraction = FloatMantissa(op);
+ fraction <<= 29;
+ } else {
+ exp = DoubleExp(op);
+ fraction = DoubleMantissa(op);
+ }
+
+ if (exp == 0) {
+ while (Bits(fraction, 51, 51) == 0) {
+ fraction = Bits(fraction, 50, 0) << 1;
+ exp -= 1;
+ }
+ fraction = Bits(fraction, 50, 0) << 1;
+ }
+
+ double scaled;
+ if (Bits(exp, 0, 0) == 0) {
+ scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44);
+ } else {
+ scaled = DoublePack(0, 1021, Bits(fraction, 51, 44) << 44);
+ }
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ result_exp = (380 - exp) / 2;
+ } else {
+ result_exp = (3068 - exp) / 2;
+ }
+
+ uint64_t estimate = DoubleToRawbits(recip_sqrt_estimate(scaled));
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
+ uint32_t est_bits = static_cast<uint32_t>(Bits(estimate, 51, 29));
+ return FloatPack(0, exp_bits, est_bits);
+ } else {
+ return DoublePack(0, Bits(result_exp, 10, 0), Bits(estimate, 51, 0));
+ }
+ }
+}
+
+
+LogicVRegister Simulator::frsqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ dst.SetFloat(i, FPRecipSqrtEstimate<float>(input));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ dst.SetFloat(i, FPRecipSqrtEstimate<double>(input));
+ }
+ }
+ return dst;
+}
+
+template <typename T>
+T Simulator::FPRecipEstimate(T op, FPRounding rounding) {
+ uint32_t sign;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = FloatSign(op);
+ } else {
+ sign = DoubleSign(op);
+ }
+
+ if (std::isnan(op)) {
+ return FPProcessNaN(op);
+ } else if (std::isinf(op)) {
+ return (sign == 1) ? -0.0 : 0.0;
+ } else if (op == 0.0) {
+ FPProcessException(); // FPExc_DivideByZero exception.
+ return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
+ } else if (((sizeof(T) == sizeof(float)) && // NOLINT(runtime/sizeof)
+ (std::fabs(op) < std::pow(2.0, -128.0))) ||
+ ((sizeof(T) == sizeof(double)) && // NOLINT(runtime/sizeof)
+ (std::fabs(op) < std::pow(2.0, -1024.0)))) {
+ bool overflow_to_inf = false;
+ switch (rounding) {
+ case FPTieEven: overflow_to_inf = true; break;
+ case FPPositiveInfinity: overflow_to_inf = (sign == 0); break;
+ case FPNegativeInfinity: overflow_to_inf = (sign == 1); break;
+ case FPZero: overflow_to_inf = false; break;
+ default: break;
+ }
+ FPProcessException(); // FPExc_Overflow and FPExc_Inexact.
+ if (overflow_to_inf) {
+ return (sign == 1) ? kFP64NegativeInfinity : kFP64PositiveInfinity;
+ } else {
+ // Return FPMaxNormal(sign).
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ return FloatPack(sign, 0xfe, 0x07fffff);
+ } else {
+ return DoublePack(sign, 0x7fe, 0x0fffffffffffffl);
+ }
+ }
+ } else {
+ uint64_t fraction;
+ int exp, result_exp;
+ uint32_t sign;
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = FloatSign(op);
+ exp = FloatExp(op);
+ fraction = FloatMantissa(op);
+ fraction <<= 29;
+ } else {
+ sign = DoubleSign(op);
+ exp = DoubleExp(op);
+ fraction = DoubleMantissa(op);
+ }
+
+ if (exp == 0) {
+ if (Bits(fraction, 51, 51) == 0) {
+ exp -= 1;
+ fraction = Bits(fraction, 49, 0) << 2;
+ } else {
+ fraction = Bits(fraction, 50, 0) << 1;
+ }
+ }
+
+ double scaled = DoublePack(0, 1022, Bits(fraction, 51, 44) << 44);
+
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ result_exp = (253 - exp); // In range 253-254 = -1 to 253+1 = 254.
+ } else {
+ result_exp = (2045 - exp); // In range 2045-2046 = -1 to 2045+1 = 2046.
+ }
+
+ double estimate = recip_estimate(scaled);
+
+ fraction = DoubleMantissa(estimate);
+ if (result_exp == 0) {
+ fraction = (UINT64_C(1) << 51) | Bits(fraction, 51, 1);
+ } else if (result_exp == -1) {
+ fraction = (UINT64_C(1) << 50) | Bits(fraction, 51, 2);
+ result_exp = 0;
+ }
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ uint32_t exp_bits = static_cast<uint32_t>(Bits(result_exp, 7, 0));
+ uint32_t frac_bits = static_cast<uint32_t>(Bits(fraction, 51, 29));
+ return FloatPack(sign, exp_bits, frac_bits);
+ } else {
+ return DoublePack(sign, Bits(result_exp, 10, 0), Bits(fraction, 51, 0));
+ }
+ }
+}
+
+
+LogicVRegister Simulator::frecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding round) {
+ dst.ClearForWrite(vform);
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ float input = src.Float<float>(i);
+ dst.SetFloat(i, FPRecipEstimate<float>(input, round));
+ }
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ double input = src.Float<double>(i);
+ dst.SetFloat(i, FPRecipEstimate<double>(input, round));
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ursqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ uint64_t operand;
+ uint32_t result;
+ double dp_operand, dp_result;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ operand = src.Uint(vform, i);
+ if (operand <= 0x3FFFFFFF) {
+ result = 0xFFFFFFFF;
+ } else {
+ dp_operand = operand * std::pow(2.0, -32);
+ dp_result = recip_sqrt_estimate(dp_operand) * std::pow(2.0, 31);
+ result = static_cast<uint32_t>(dp_result);
+ }
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+
+// Based on reference C function recip_estimate from ARM ARM.
+double Simulator::recip_estimate(double a) {
+ int q, s;
+ double r;
+ q = static_cast<int>(a * 512.0);
+ r = 1.0 / ((static_cast<double>(q) + 0.5) / 512.0);
+ s = static_cast<int>(256.0 * r + 0.5);
+ return static_cast<double>(s) / 256.0;
+}
+
+
+LogicVRegister Simulator::urecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ uint64_t operand;
+ uint32_t result;
+ double dp_operand, dp_result;
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ operand = src.Uint(vform, i);
+ if (operand <= 0x7FFFFFFF) {
+ result = 0xFFFFFFFF;
+ } else {
+ dp_operand = operand * std::pow(2.0, -32);
+ dp_result = recip_estimate(dp_operand) * std::pow(2.0, 31);
+ result = static_cast<uint32_t>(dp_result);
+ }
+ dst.SetUint(vform, i, result);
+ }
+ return dst;
+}
+
+template <typename T>
+LogicVRegister Simulator::frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ dst.ClearForWrite(vform);
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ T op = src.Float<T>(i);
+ T result;
+ if (std::isnan(op)) {
+ result = FPProcessNaN(op);
+ } else {
+ int exp;
+ uint32_t sign;
+ if (sizeof(T) == sizeof(float)) { // NOLINT(runtime/sizeof)
+ sign = FloatSign(op);
+ exp = FloatExp(op);
+ exp = (exp == 0) ? (0xFF - 1) : static_cast<int>(Bits(~exp, 7, 0));
+ result = FloatPack(sign, exp, 0);
+ } else {
+ sign = DoubleSign(op);
+ exp = DoubleExp(op);
+ exp = (exp == 0) ? (0x7FF - 1) : static_cast<int>(Bits(~exp, 10, 0));
+ result = DoublePack(sign, exp, 0);
+ }
+ }
+ dst.SetFloat(i, result);
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ frecpx<float>(vform, dst, src);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ frecpx<double>(vform, dst, src);
+ }
+ return dst;
+}
+
+LogicVRegister Simulator::scvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding round) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ float result = FixedToFloat(src.Int(kFormatS, i), fbits, round);
+ dst.SetFloat<float>(i, result);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ double result = FixedToDouble(src.Int(kFormatD, i), fbits, round);
+ dst.SetFloat<double>(i, result);
+ }
+ }
+ return dst;
+}
+
+
+LogicVRegister Simulator::ucvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding round) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ if (LaneSizeInBitsFromFormat(vform) == kSRegSize) {
+ float result = UFixedToFloat(src.Uint(kFormatS, i), fbits, round);
+ dst.SetFloat<float>(i, result);
+ } else {
+ VIXL_ASSERT(LaneSizeInBitsFromFormat(vform) == kDRegSize);
+ double result = UFixedToDouble(src.Uint(kFormatD, i), fbits, round);
+ dst.SetFloat<double>(i, result);
+ }
+ }
+ return dst;
+}
+
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
new file mode 100644
index 0000000000..5c4a5ce145
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.cpp
@@ -0,0 +1,2027 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/MacroAssembler-vixl.h"
+
+#include <ctype.h>
+
+namespace vixl {
+
+MacroAssembler::MacroAssembler()
+ : js::jit::Assembler(),
+ sp_(x28),
+ tmp_list_(ip0, ip1),
+ fptmp_list_(d31)
+{
+}
+
+
+void MacroAssembler::FinalizeCode() {
+ Assembler::FinalizeCode();
+}
+
+
+int MacroAssembler::MoveImmediateHelper(MacroAssembler* masm,
+ const Register &rd,
+ uint64_t imm) {
+ bool emit_code = (masm != NULL);
+ VIXL_ASSERT(IsUint32(imm) || IsInt32(imm) || rd.Is64Bits());
+ // The worst case for size is mov 64-bit immediate to sp:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to move to sp
+ MacroEmissionCheckScope guard(masm);
+
+ // Immediates on Aarch64 can be produced using an initial value, and zero to
+ // three move keep operations.
+ //
+ // Initial values can be generated with:
+ // 1. 64-bit move zero (movz).
+ // 2. 32-bit move inverted (movn).
+ // 3. 64-bit move inverted.
+ // 4. 32-bit orr immediate.
+ // 5. 64-bit orr immediate.
+ // Move-keep may then be used to modify each of the 16-bit half words.
+ //
+ // The code below supports all five initial value generators, and
+ // applying move-keep operations to move-zero and move-inverted initial
+ // values.
+
+ // Try to move the immediate in one instruction, and if that fails, switch to
+ // using multiple instructions.
+ if (OneInstrMoveImmediateHelper(masm, rd, imm)) {
+ return 1;
+ } else {
+ int instruction_count = 0;
+ unsigned reg_size = rd.size();
+
+ // Generic immediate case. Imm will be represented by
+ // [imm3, imm2, imm1, imm0], where each imm is 16 bits.
+ // A move-zero or move-inverted is generated for the first non-zero or
+ // non-0xffff immX, and a move-keep for subsequent non-zero immX.
+
+ uint64_t ignored_halfword = 0;
+ bool invert_move = false;
+ // If the number of 0xffff halfwords is greater than the number of 0x0000
+ // halfwords, it's more efficient to use move-inverted.
+ if (CountClearHalfWords(~imm, reg_size) >
+ CountClearHalfWords(imm, reg_size)) {
+ ignored_halfword = 0xffff;
+ invert_move = true;
+ }
+
+ // Mov instructions can't move values into the stack pointer, so set up a
+ // temporary register, if needed.
+ UseScratchRegisterScope temps;
+ Register temp;
+ if (emit_code) {
+ temps.Open(masm);
+ temp = rd.IsSP() ? temps.AcquireSameSizeAs(rd) : rd;
+ }
+
+ // Iterate through the halfwords. Use movn/movz for the first non-ignored
+ // halfword, and movk for subsequent halfwords.
+ VIXL_ASSERT((reg_size % 16) == 0);
+ bool first_mov_done = false;
+ for (unsigned i = 0; i < (temp.size() / 16); i++) {
+ uint64_t imm16 = (imm >> (16 * i)) & 0xffff;
+ if (imm16 != ignored_halfword) {
+ if (!first_mov_done) {
+ if (invert_move) {
+ if (emit_code) masm->movn(temp, ~imm16 & 0xffff, 16 * i);
+ instruction_count++;
+ } else {
+ if (emit_code) masm->movz(temp, imm16, 16 * i);
+ instruction_count++;
+ }
+ first_mov_done = true;
+ } else {
+ // Construct a wider constant.
+ if (emit_code) masm->movk(temp, imm16, 16 * i);
+ instruction_count++;
+ }
+ }
+ }
+
+ VIXL_ASSERT(first_mov_done);
+
+ // Move the temporary if the original destination register was the stack
+ // pointer.
+ if (rd.IsSP()) {
+ if (emit_code) masm->mov(rd, temp);
+ instruction_count++;
+ }
+ return instruction_count;
+ }
+}
+
+
+bool MacroAssembler::OneInstrMoveImmediateHelper(MacroAssembler* masm,
+ const Register& dst,
+ int64_t imm) {
+ bool emit_code = masm != NULL;
+ unsigned n, imm_s, imm_r;
+ int reg_size = dst.size();
+
+ if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move zero instruction. Movz can't write
+ // to the stack pointer.
+ if (emit_code) {
+ masm->movz(dst, imm);
+ }
+ return true;
+ } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
+ // Immediate can be represented in a move negative instruction. Movn can't
+ // write to the stack pointer.
+ if (emit_code) {
+ masm->movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
+ }
+ return true;
+ } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be represented in a logical orr instruction.
+ VIXL_ASSERT(!dst.IsZero());
+ if (emit_code) {
+ masm->LogicalImmediate(
+ dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
+ }
+ return true;
+ }
+ return false;
+}
+
+
+void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
+ VIXL_ASSERT((reg.Is(NoReg) || (type >= kBranchTypeFirstUsingReg)) &&
+ ((bit == -1) || (type >= kBranchTypeFirstUsingBit)));
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ B(static_cast<Condition>(type), label);
+ } else {
+ switch (type) {
+ case always: B(label); break;
+ case never: break;
+ case reg_zero: Cbz(reg, label); break;
+ case reg_not_zero: Cbnz(reg, label); break;
+ case reg_bit_clear: Tbz(reg, bit, label); break;
+ case reg_bit_set: Tbnz(reg, bit, label); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ }
+}
+
+
+void MacroAssembler::B(Label* label) {
+ SingleEmissionCheckScope guard(this);
+ b(label);
+}
+
+
+void MacroAssembler::B(Label* label, Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ b(&done, InvertCondition(cond));
+ b(label);
+ bind(&done);
+ } else {
+ b(label, cond);
+ }
+}
+
+
+void MacroAssembler::Cbnz(const Register& rt, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ cbz(rt, &done);
+ b(label);
+ bind(&done);
+ } else {
+ cbnz(rt, label);
+ }
+}
+
+
+void MacroAssembler::Cbz(const Register& rt, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, CondBranchType)) {
+ Label done;
+ cbnz(rt, &done);
+ b(label);
+ bind(&done);
+ } else {
+ cbz(rt, label);
+ }
+}
+
+
+void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) {
+ Label done;
+ tbz(rt, bit_pos, &done);
+ b(label);
+ bind(&done);
+ } else {
+ tbnz(rt, bit_pos, label);
+ }
+}
+
+
+void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ VIXL_ASSERT(!rt.IsZero());
+ EmissionCheckScope guard(this, 2 * kInstructionSize);
+
+ if (label->bound() && LabelIsOutOfRange(label, TestBranchType)) {
+ Label done;
+ tbnz(rt, bit_pos, &done);
+ b(label);
+ bind(&done);
+ } else {
+ tbz(rt, bit_pos, label);
+ }
+}
+
+
+void MacroAssembler::And(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, AND);
+}
+
+
+void MacroAssembler::Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ANDS);
+}
+
+
+void MacroAssembler::Tst(const Register& rn,
+ const Operand& operand) {
+ Ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, BIC);
+}
+
+
+void MacroAssembler::Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, BICS);
+}
+
+
+void MacroAssembler::Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ORR);
+}
+
+
+void MacroAssembler::Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, ORN);
+}
+
+
+void MacroAssembler::Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, EOR);
+}
+
+
+void MacroAssembler::Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ LogicalMacro(rd, rn, operand, EON);
+}
+
+
+void MacroAssembler::LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op) {
+ // The worst case for size is logical immediate to sp:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to do the operation
+ // * 1 instruction to move to sp
+ MacroEmissionCheckScope guard(this);
+ UseScratchRegisterScope temps(this);
+
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.size();
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = ~immediate;
+ }
+
+ // Ignore the top 32 bits of an immediate if we're moving to a W register.
+ if (rd.Is32Bits()) {
+ // Check that the top 32 bits are consistent.
+ VIXL_ASSERT(((immediate >> kWRegSize) == 0) ||
+ ((immediate >> kWRegSize) == -1));
+ immediate &= kWRegMask;
+ }
+
+ VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
+
+ // Special cases for all set or all clear immediates.
+ if (immediate == 0) {
+ switch (op) {
+ case AND:
+ Mov(rd, 0);
+ return;
+ case ORR:
+ VIXL_FALLTHROUGH();
+ case EOR:
+ Mov(rd, rn);
+ return;
+ case ANDS:
+ VIXL_FALLTHROUGH();
+ case BICS:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ } else if ((rd.Is64Bits() && (immediate == -1)) ||
+ (rd.Is32Bits() && (immediate == 0xffffffff))) {
+ switch (op) {
+ case AND:
+ Mov(rd, rn);
+ return;
+ case ORR:
+ Mov(rd, immediate);
+ return;
+ case EOR:
+ Mvn(rd, rn);
+ return;
+ case ANDS:
+ VIXL_FALLTHROUGH();
+ case BICS:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // Immediate can't be encoded: synthesize using move immediate.
+ Register temp = temps.AcquireSameSizeAs(rn);
+
+ // If the left-hand input is the stack pointer, we can't pre-shift the
+ // immediate, as the encoding won't allow the subsequent post shift.
+ PreShiftImmMode mode = rn.IsSP() ? kNoShift : kAnyShift;
+ Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate, mode);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+
+ if (rd.Is(sp)) {
+ // If rd is the stack pointer we cannot use it as the destination
+ // register so we use the temp register as an intermediate again.
+ Logical(temp, rn, imm_operand, op);
+ Mov(sp, temp);
+ } else {
+ Logical(rd, rn, imm_operand, op);
+ }
+ }
+ } else if (operand.IsExtendedRegister()) {
+ VIXL_ASSERT(operand.reg().size() <= rd.size());
+ // Add/sub extended supports shift <= 4. We want to support exactly the
+ // same modes here.
+ VIXL_ASSERT(operand.shift_amount() <= 4);
+ VIXL_ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ Logical(rd, rn, Operand(temp), op);
+ } else {
+ // The operand can be encoded in the instruction.
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ Logical(rd, rn, operand, op);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode) {
+ // The worst case for size is mov immediate with up to 4 instructions.
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mov(rd, operand.immediate());
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Emit a shift instruction if moving a shifted register. This operation
+ // could also be achieved using an orr instruction (like orn used by Mvn),
+ // but using a shift instruction makes the disassembly clearer.
+ EmitShift(rd, operand.reg(), operand.shift(), operand.shift_amount());
+ } else if (operand.IsExtendedRegister()) {
+ // Emit an extend instruction if moving an extended register. This handles
+ // extend with post-shift operations, too.
+ EmitExtendShift(rd, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ } else {
+ // Otherwise, emit a register move only if the registers are distinct, or
+ // if they are not X registers.
+ //
+ // Note that mov(w0, w0) is not a no-op because it clears the top word of
+ // x0. A flag is provided (kDiscardForSameWReg) if a move between the same W
+ // registers is not required to clear the top word of the X register. In
+ // this case, the instruction is discarded.
+ //
+ // If the sp is an operand, add #0 is emitted, otherwise, orr #0.
+ if (!rd.Is(operand.reg()) || (rd.Is32Bits() &&
+ (discard_mode == kDontDiscardForSameWReg))) {
+ mov(rd, operand.reg());
+ }
+ }
+}
+
+
+void MacroAssembler::Movi16bitHelper(const VRegister& vd, uint64_t imm) {
+ VIXL_ASSERT(IsUint16(imm));
+ int byte1 = (imm & 0xff);
+ int byte2 = ((imm >> 8) & 0xff);
+ if (byte1 == byte2) {
+ movi(vd.Is64Bits() ? vd.V8B() : vd.V16B(), byte1);
+ } else if (byte1 == 0) {
+ movi(vd, byte2, LSL, 8);
+ } else if (byte2 == 0) {
+ movi(vd, byte1);
+ } else if (byte1 == 0xff) {
+ mvni(vd, ~byte2 & 0xff, LSL, 8);
+ } else if (byte2 == 0xff) {
+ mvni(vd, ~byte1 & 0xff);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireW();
+ movz(temp, imm);
+ dup(vd, temp);
+ }
+}
+
+
+void MacroAssembler::Movi32bitHelper(const VRegister& vd, uint64_t imm) {
+ VIXL_ASSERT(IsUint32(imm));
+
+ uint8_t bytes[sizeof(imm)];
+ memcpy(bytes, &imm, sizeof(imm));
+
+ // All bytes are either 0x00 or 0xff.
+ {
+ bool all0orff = true;
+ for (int i = 0; i < 4; ++i) {
+ if ((bytes[i] != 0) && (bytes[i] != 0xff)) {
+ all0orff = false;
+ break;
+ }
+ }
+
+ if (all0orff == true) {
+ movi(vd.Is64Bits() ? vd.V1D() : vd.V2D(), ((imm << 32) | imm));
+ return;
+ }
+ }
+
+ // Of the 4 bytes, only one byte is non-zero.
+ for (int i = 0; i < 4; i++) {
+ if ((imm & (0xff << (i * 8))) == imm) {
+ movi(vd, bytes[i], LSL, i * 8);
+ return;
+ }
+ }
+
+ // Of the 4 bytes, only one byte is not 0xff.
+ for (int i = 0; i < 4; i++) {
+ uint32_t mask = ~(0xff << (i * 8));
+ if ((imm & mask) == mask) {
+ mvni(vd, ~bytes[i] & 0xff, LSL, i * 8);
+ return;
+ }
+ }
+
+ // Immediate is of the form 0x00MMFFFF.
+ if ((imm & 0xff00ffff) == 0x0000ffff) {
+ movi(vd, bytes[2], MSL, 16);
+ return;
+ }
+
+ // Immediate is of the form 0x0000MMFF.
+ if ((imm & 0xffff00ff) == 0x000000ff) {
+ movi(vd, bytes[1], MSL, 8);
+ return;
+ }
+
+ // Immediate is of the form 0xFFMM0000.
+ if ((imm & 0xff00ffff) == 0xff000000) {
+ mvni(vd, ~bytes[2] & 0xff, MSL, 16);
+ return;
+ }
+ // Immediate is of the form 0xFFFFMM00.
+ if ((imm & 0xffff00ff) == 0xffff0000) {
+ mvni(vd, ~bytes[1] & 0xff, MSL, 8);
+ return;
+ }
+
+ // Top and bottom 16-bits are equal.
+ if (((imm >> 16) & 0xffff) == (imm & 0xffff)) {
+ Movi16bitHelper(vd.Is64Bits() ? vd.V4H() : vd.V8H(), imm & 0xffff);
+ return;
+ }
+
+ // Default case.
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireW();
+ Mov(temp, imm);
+ dup(vd, temp);
+ }
+}
+
+
+void MacroAssembler::Movi64bitHelper(const VRegister& vd, uint64_t imm) {
+ // All bytes are either 0x00 or 0xff.
+ {
+ bool all0orff = true;
+ for (int i = 0; i < 8; ++i) {
+ int byteval = (imm >> (i * 8)) & 0xff;
+ if (byteval != 0 && byteval != 0xff) {
+ all0orff = false;
+ break;
+ }
+ }
+ if (all0orff == true) {
+ movi(vd, imm);
+ return;
+ }
+ }
+
+ // Top and bottom 32-bits are equal.
+ if (((imm >> 32) & 0xffffffff) == (imm & 0xffffffff)) {
+ Movi32bitHelper(vd.Is64Bits() ? vd.V2S() : vd.V4S(), imm & 0xffffffff);
+ return;
+ }
+
+ // Default case.
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireX();
+ Mov(temp, imm);
+ if (vd.Is1D()) {
+ mov(vd.D(), 0, temp);
+ } else {
+ dup(vd.V2D(), temp);
+ }
+ }
+}
+
+
+void MacroAssembler::Movi(const VRegister& vd,
+ uint64_t imm,
+ Shift shift,
+ int shift_amount) {
+ MacroEmissionCheckScope guard(this);
+ if (shift_amount != 0 || shift != LSL) {
+ movi(vd, imm, shift, shift_amount);
+ } else if (vd.Is8B() || vd.Is16B()) {
+ // 8-bit immediate.
+ VIXL_ASSERT(IsUint8(imm));
+ movi(vd, imm);
+ } else if (vd.Is4H() || vd.Is8H()) {
+ // 16-bit immediate.
+ Movi16bitHelper(vd, imm);
+ } else if (vd.Is2S() || vd.Is4S()) {
+ // 32-bit immediate.
+ Movi32bitHelper(vd, imm);
+ } else {
+ // 64-bit immediate.
+ Movi64bitHelper(vd, imm);
+ }
+}
+
+
+void MacroAssembler::Movi(const VRegister& vd,
+ uint64_t hi,
+ uint64_t lo) {
+ VIXL_ASSERT(vd.Is128Bits());
+ UseScratchRegisterScope temps(this);
+
+ // When hi == lo, the following generates good code.
+ //
+ // In situations where the constants are complex and hi != lo, the following
+ // can turn into up to 10 instructions: 2*(mov + 3*movk + dup/insert). To do
+ // any better, we could try to estimate whether splatting the high value and
+ // updating the low value would generate fewer instructions than vice versa
+ // (what we do now).
+ //
+ // (A PC-relative load from memory to the vector register (ADR + LD2) is going
+ // to have fairly high latency but is fairly compact; not clear what the best
+ // tradeoff is.)
+
+ Movi(vd.V2D(), lo);
+ if (hi != lo) {
+ Register temp = temps.AcquireX();
+ Mov(temp, hi);
+ Ins(vd.V2D(), 1, temp);
+ }
+}
+
+
+void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
+ // The worst case for size is mvn immediate with up to 4 instructions.
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Call the macro assembler for generic immediates.
+ Mvn(rd, operand.immediate());
+ } else if (operand.IsExtendedRegister()) {
+ UseScratchRegisterScope temps(this);
+ temps.Exclude(operand.reg());
+
+ // Emit two instructions for the extend case. This differs from Mov, as
+ // the extend and invert can't be achieved in one instruction.
+ Register temp = temps.AcquireSameSizeAs(rd);
+
+ // VIXL can acquire temp registers. Assert that the caller is aware.
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(operand.maybeReg()));
+
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ mvn(rd, Operand(temp));
+ } else {
+ // Otherwise, register and shifted register cases can be handled by the
+ // assembler directly, using orn.
+ mvn(rd, operand);
+ }
+}
+
+
+void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
+ MoveImmediateHelper(this, rd, imm);
+}
+
+
+void MacroAssembler::Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMN);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMP);
+ }
+}
+
+
+void MacroAssembler::Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond) {
+ if (operand.IsImmediate() && (operand.immediate() < 0)) {
+ ConditionalCompareMacro(rn, -operand.immediate(), nzcv, cond, CCMP);
+ } else {
+ ConditionalCompareMacro(rn, operand, nzcv, cond, CCMN);
+ }
+}
+
+
+void MacroAssembler::ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ // The worst case for size is ccmp immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for ccmp
+ MacroEmissionCheckScope guard(this);
+
+ if ((operand.IsShiftedRegister() && (operand.shift_amount() == 0)) ||
+ (operand.IsImmediate() && IsImmConditionalCompare(operand.immediate()))) {
+ // The immediate can be encoded in the instruction, or the operand is an
+ // unshifted register: call the assembler.
+ ConditionalCompare(rn, operand, nzcv, cond, op);
+ } else {
+ UseScratchRegisterScope temps(this);
+ // The operand isn't directly supported by the instruction: perform the
+ // operation on a temporary register.
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ ConditionalCompare(rn, temp, nzcv, cond, op);
+ }
+}
+
+
+void MacroAssembler::Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ // The worst case for size is csel immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for csel
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsImmediate()) {
+ // Immediate argument. Handle special cases of 0, 1 and -1 using zero
+ // register.
+ int64_t imm = operand.immediate();
+ Register zr = AppropriateZeroRegFor(rn);
+ if (imm == 0) {
+ csel(rd, rn, zr, cond);
+ } else if (imm == 1) {
+ csinc(rd, rn, zr, cond);
+ } else if (imm == -1) {
+ csinv(rd, rn, zr, cond);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+ Mov(temp, operand.immediate());
+ csel(rd, rn, temp, cond);
+ }
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() == 0)) {
+ // Unshifted register argument.
+ csel(rd, rn, operand.reg(), cond);
+ } else {
+ // All other arguments.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn));
+ VIXL_ASSERT(!temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ csel(rd, rn, temp, cond);
+ }
+}
+
+
+void MacroAssembler::Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S) {
+ if (operand.IsImmediate() && (operand.immediate() < 0) &&
+ IsImmAddSub(-operand.immediate())) {
+ AddSubMacro(rd, rn, -operand.immediate(), S, SUB);
+ } else {
+ AddSubMacro(rd, rn, operand, S, ADD);
+ }
+}
+
+
+void MacroAssembler::Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Add(rd, rn, operand, SetFlags);
+}
+
+
+void MacroAssembler::Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S) {
+ if (operand.IsImmediate() && (operand.immediate() < 0) &&
+ IsImmAddSub(-operand.immediate())) {
+ AddSubMacro(rd, rn, -operand.immediate(), S, ADD);
+ } else {
+ AddSubMacro(rd, rn, operand, S, SUB);
+ }
+}
+
+
+void MacroAssembler::Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ Sub(rd, rn, operand, SetFlags);
+}
+
+
+void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
+ Adds(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
+ Subs(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void MacroAssembler::Fcmp(const FPRegister& fn, double value,
+ FPTrapFlags trap) {
+ // The worst case for size is:
+ // * 1 to materialise the constant, using literal pool if necessary
+ // * 1 instruction for fcmp{e}
+ MacroEmissionCheckScope guard(this);
+ if (value != 0.0) {
+ UseScratchRegisterScope temps(this);
+ FPRegister tmp = temps.AcquireSameSizeAs(fn);
+ VIXL_ASSERT(!tmp.Is(fn));
+ Fmov(tmp, value);
+ FPCompareMacro(fn, tmp, trap);
+ } else {
+ FPCompareMacro(fn, value, trap);
+ }
+}
+
+
+void MacroAssembler::Fcmpe(const FPRegister& fn, double value) {
+ Fcmp(fn, value, EnableTrap);
+}
+
+
+void MacroAssembler::Fmov(VRegister vd, double imm) {
+ // Floating point immediates are loaded through the literal pool.
+ MacroEmissionCheckScope guard(this);
+
+ if (vd.Is1S() || vd.Is2S() || vd.Is4S()) {
+ Fmov(vd, static_cast<float>(imm));
+ return;
+ }
+
+ VIXL_ASSERT(vd.Is1D() || vd.Is2D());
+ if (IsImmFP64(imm)) {
+ fmov(vd, imm);
+ } else {
+ uint64_t rawbits = DoubleToRawbits(imm);
+ if (vd.IsScalar()) {
+ if (rawbits == 0) {
+ fmov(vd, xzr);
+ } else {
+ Assembler::fImmPool64(vd, imm);
+ }
+ } else {
+ // TODO: consider NEON support for load literal.
+ Movi(vd, rawbits);
+ }
+ }
+}
+
+
+void MacroAssembler::Fmov(VRegister vd, float imm) {
+ // Floating point immediates are loaded through the literal pool.
+ MacroEmissionCheckScope guard(this);
+
+ if (vd.Is1D() || vd.Is2D()) {
+ Fmov(vd, static_cast<double>(imm));
+ return;
+ }
+
+ VIXL_ASSERT(vd.Is1S() || vd.Is2S() || vd.Is4S());
+ if (IsImmFP32(imm)) {
+ fmov(vd, imm);
+ } else {
+ uint32_t rawbits = FloatToRawbits(imm);
+ if (vd.IsScalar()) {
+ if (rawbits == 0) {
+ fmov(vd, wzr);
+ } else {
+ Assembler::fImmPool32(vd, imm);
+ }
+ } else {
+ // TODO: consider NEON support for load literal.
+ Movi(vd, rawbits);
+ }
+ }
+}
+
+
+
+void MacroAssembler::Neg(const Register& rd,
+ const Operand& operand) {
+ if (operand.IsImmediate()) {
+ Mov(rd, -operand.immediate());
+ } else {
+ Sub(rd, AppropriateZeroRegFor(rd), operand);
+ }
+}
+
+
+void MacroAssembler::Negs(const Register& rd,
+ const Operand& operand) {
+ Subs(rd, AppropriateZeroRegFor(rd), operand);
+}
+
+
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+ int64_t imm) {
+ return OneInstrMoveImmediateHelper(this, dst, imm);
+}
+
+
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm,
+ PreShiftImmMode mode) {
+ int reg_size = dst.size();
+
+ // Encode the immediate in a single move instruction, if possible.
+ if (TryOneInstrMoveImmediate(dst, imm)) {
+ // The move was successful; nothing to do here.
+ } else {
+ // Pre-shift the immediate to the least-significant bits of the register.
+ int shift_low = CountTrailingZeros(imm, reg_size);
+ if (mode == kLimitShiftForSP) {
+ // When applied to the stack pointer, the subsequent arithmetic operation
+ // can use the extend form to shift left by a maximum of four bits. Right
+ // shifts are not allowed, so we filter them out later before the new
+ // immediate is tested.
+ shift_low = std::min(shift_low, 4);
+ }
+
+ int64_t imm_low = imm >> shift_low;
+
+ // Pre-shift the immediate to the most-significant bits of the register,
+ // inserting set bits in the least-significant bits.
+ int shift_high = CountLeadingZeros(imm, reg_size);
+ int64_t imm_high = (imm << shift_high) | ((INT64_C(1) << shift_high) - 1);
+
+ if ((mode != kNoShift) && TryOneInstrMoveImmediate(dst, imm_low)) {
+ // The new immediate has been moved into the destination's low bits:
+ // return a new leftward-shifting operand.
+ return Operand(dst, LSL, shift_low);
+ } else if ((mode == kAnyShift) && TryOneInstrMoveImmediate(dst, imm_high)) {
+ // The new immediate has been moved into the destination's high bits:
+ // return a new rightward-shifting operand.
+ return Operand(dst, LSR, shift_high);
+ } else {
+ Mov(dst, imm);
+ }
+ }
+ return Operand(dst);
+}
+
+
+void MacroAssembler::ComputeAddress(const Register& dst,
+ const MemOperand& mem_op) {
+ // We cannot handle pre-indexing or post-indexing.
+ VIXL_ASSERT(mem_op.addrmode() == Offset);
+ Register base = mem_op.base();
+ if (mem_op.IsImmediateOffset()) {
+ Add(dst, base, mem_op.offset());
+ } else {
+ VIXL_ASSERT(mem_op.IsRegisterOffset());
+ Register reg_offset = mem_op.regoffset();
+ Shift shift = mem_op.shift();
+ Extend extend = mem_op.extend();
+ if (shift == NO_SHIFT) {
+ VIXL_ASSERT(extend != NO_EXTEND);
+ Add(dst, base, Operand(reg_offset, extend, mem_op.shift_amount()));
+ } else {
+ VIXL_ASSERT(extend == NO_EXTEND);
+ Add(dst, base, Operand(reg_offset, shift, mem_op.shift_amount()));
+ }
+ }
+}
+
+
+void MacroAssembler::AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op) {
+ // Worst case is add/sub immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for add/sub
+ MacroEmissionCheckScope guard(this);
+
+ if (operand.IsZero() && rd.Is(rn) && rd.Is64Bits() && rn.Is64Bits() &&
+ (S == LeaveFlags)) {
+ // The instruction would be a nop. Avoid generating useless code.
+ return;
+ }
+
+ if ((operand.IsImmediate() && !IsImmAddSub(operand.immediate())) ||
+ (rn.IsZero() && !operand.IsShiftedRegister()) ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(rn);
+ if (operand.IsImmediate()) {
+ PreShiftImmMode mode = kAnyShift;
+
+ // If the destination or source register is the stack pointer, we can
+ // only pre-shift the immediate right by values supported in the add/sub
+ // extend encoding.
+ if (rd.IsSP()) {
+ // If the destination is SP and flags will be set, we can't pre-shift
+ // the immediate at all.
+ mode = (S == SetFlags) ? kNoShift : kLimitShiftForSP;
+ } else if (rn.IsSP()) {
+ mode = kLimitShiftForSP;
+ }
+
+ Operand imm_operand =
+ MoveImmediateForShiftedOp(temp, operand.immediate(), mode);
+ AddSub(rd, rn, imm_operand, S, op);
+ } else {
+ Mov(temp, operand);
+ AddSub(rd, rn, temp, S, op);
+ }
+ } else {
+ AddSub(rd, rn, operand, S, op);
+ }
+}
+
+
+void MacroAssembler::Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
+}
+
+
+void MacroAssembler::Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
+}
+
+
+void MacroAssembler::Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
+}
+
+
+void MacroAssembler::Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand) {
+ AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
+}
+
+
+void MacroAssembler::Ngc(const Register& rd,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbc(rd, zr, operand);
+}
+
+
+void MacroAssembler::Ngcs(const Register& rd,
+ const Operand& operand) {
+ Register zr = AppropriateZeroRegFor(rd);
+ Sbcs(rd, zr, operand);
+}
+
+
+void MacroAssembler::AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op) {
+ VIXL_ASSERT(rd.size() == rn.size());
+ // Worst case is addc/subc immediate:
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction for add/sub
+ MacroEmissionCheckScope guard(this);
+ UseScratchRegisterScope temps(this);
+
+ if (operand.IsImmediate() ||
+ (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
+ // Add/sub with carry (immediate or ROR shifted register.)
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ Mov(temp, operand);
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
+ // Add/sub with carry (shifted register).
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ VIXL_ASSERT(operand.shift() != ROR);
+ VIXL_ASSERT(IsUintN(rd.size() == kXRegSize ? kXRegSizeLog2 : kWRegSizeLog2,
+ operand.shift_amount()));
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ EmitShift(temp, operand.reg(), operand.shift(), operand.shift_amount());
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else if (operand.IsExtendedRegister()) {
+ // Add/sub with carry (extended register).
+ VIXL_ASSERT(operand.reg().size() <= rd.size());
+ // Add/sub extended supports a shift <= 4. We want to support exactly the
+ // same modes.
+ VIXL_ASSERT(operand.shift_amount() <= 4);
+ VIXL_ASSERT(operand.reg().Is64Bits() ||
+ ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
+ temps.Exclude(operand.reg());
+ Register temp = temps.AcquireSameSizeAs(rn);
+ VIXL_ASSERT(!temp.Is(rd) && !temp.Is(rn) && !temp.Is(operand.maybeReg()));
+ EmitExtendShift(temp, operand.reg(), operand.extend(),
+ operand.shift_amount());
+ AddSubWithCarry(rd, rn, Operand(temp), S, op);
+ } else {
+ // The addressing mode is directly supported by the instruction.
+ AddSubWithCarry(rd, rn, operand, S, op);
+ }
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, OP) \
+void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) { \
+ LoadStoreMacro(REG, addr, OP); \
+}
+LS_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
+void MacroAssembler::LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op) {
+ // Worst case is ldr/str pre/post index:
+ // * 1 instruction for ldr/str
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to update the base
+ MacroEmissionCheckScope guard(this);
+
+ int64_t offset = addr.offset();
+ unsigned access_size = CalcLSDataSize(op);
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(offset, access_size) &&
+ !IsImmLSUnscaled(offset)) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ VIXL_ASSERT(!temp.Is(rt));
+ VIXL_ASSERT(!temp.Is(addr.base()) && !temp.Is(addr.regoffset()));
+ Mov(temp, addr.offset());
+ LoadStore(rt, MemOperand(addr.base(), temp), op);
+ } else if (addr.IsPostIndex() && !IsImmLSUnscaled(offset)) {
+ // Post-index beyond unscaled addressing range.
+ LoadStore(rt, MemOperand(addr.base()), op);
+ Add(addr.base(), addr.base(), Operand(offset));
+ } else if (addr.IsPreIndex() && !IsImmLSUnscaled(offset)) {
+ // Pre-index beyond unscaled addressing range.
+ Add(addr.base(), addr.base(), Operand(offset));
+ LoadStore(rt, MemOperand(addr.base()), op);
+ } else {
+ // Encodable in one load/store instruction.
+ LoadStore(rt, addr, op);
+ }
+}
+
+
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+void MacroAssembler::FN(const REGTYPE REG, \
+ const REGTYPE REG2, \
+ const MemOperand& addr) { \
+ LoadStorePairMacro(REG, REG2, addr, OP); \
+}
+LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op) {
+ // TODO(all): Should we support register offset for load-store-pair?
+ VIXL_ASSERT(!addr.IsRegisterOffset());
+ // Worst case is ldp/stp immediate:
+ // * 1 instruction for ldp/stp
+ // * up to 4 instructions to materialise the constant
+ // * 1 instruction to update the base
+ MacroEmissionCheckScope guard(this);
+
+ int64_t offset = addr.offset();
+ unsigned access_size = CalcLSPairDataSize(op);
+
+ // Check if the offset fits in the immediate field of the appropriate
+ // instruction. If not, emit two instructions to perform the operation.
+ if (IsImmLSPair(offset, access_size)) {
+ // Encodable in one load/store pair instruction.
+ LoadStorePair(rt, rt2, addr, op);
+ } else {
+ Register base = addr.base();
+ if (addr.IsImmediateOffset()) {
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(base);
+ Add(temp, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(temp), op);
+ } else if (addr.IsPostIndex()) {
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ Add(base, base, offset);
+ } else {
+ VIXL_ASSERT(addr.IsPreIndex());
+ Add(base, base, offset);
+ LoadStorePair(rt, rt2, MemOperand(base), op);
+ }
+ }
+}
+
+
+void MacroAssembler::Prfm(PrefetchOperation op, const MemOperand& addr) {
+ MacroEmissionCheckScope guard(this);
+
+ // There are no pre- or post-index modes for prfm.
+ VIXL_ASSERT(addr.IsImmediateOffset() || addr.IsRegisterOffset());
+
+ // The access size is implicitly 8 bytes for all prefetch operations.
+ unsigned size = kXRegSizeInBytesLog2;
+
+ // Check if an immediate offset fits in the immediate field of the
+ // appropriate instruction. If not, emit two instructions to perform
+ // the operation.
+ if (addr.IsImmediateOffset() && !IsImmLSScaled(addr.offset(), size) &&
+ !IsImmLSUnscaled(addr.offset())) {
+ // Immediate offset that can't be encoded using unsigned or unscaled
+ // addressing modes.
+ UseScratchRegisterScope temps(this);
+ Register temp = temps.AcquireSameSizeAs(addr.base());
+ Mov(temp, addr.offset());
+ Prefetch(op, MemOperand(addr.base(), temp));
+ } else {
+ // Simple register-offsets are encodable in one instruction.
+ Prefetch(op, addr);
+ }
+}
+
+
+void MacroAssembler::PushStackPointer() {
+ PrepareForPush(1, 8);
+
+ // Pushing a stack pointer leads to implementation-defined
+ // behavior, which may be surprising. In particular,
+ // str x28, [x28, #-8]!
+ // pre-decrements the stack pointer, storing the decremented value.
+ // Additionally, sp is read as xzr in this context, so it cannot be pushed.
+ // So we must use a scratch register.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireX();
+
+ Mov(scratch, GetStackPointer64());
+ str(scratch, MemOperand(GetStackPointer64(), -8, PreIndex));
+}
+
+
+void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3) {
+ VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ VIXL_ASSERT(src0.IsValid());
+
+ int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
+ int size = src0.SizeInBytes();
+
+ if (src0.Is(GetStackPointer64())) {
+ VIXL_ASSERT(count == 1);
+ VIXL_ASSERT(size == 8);
+ PushStackPointer();
+ return;
+ }
+
+ PrepareForPush(count, size);
+ PushHelper(count, size, src0, src1, src2, src3);
+}
+
+
+void MacroAssembler::Pop(const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3) {
+ // It is not valid to pop into the same register more than once in one
+ // instruction, not even into the zero register.
+ VIXL_ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(dst0.IsValid());
+
+ int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
+ int size = dst0.SizeInBytes();
+
+ PrepareForPop(count, size);
+ PopHelper(count, size, dst0, dst1, dst2, dst3);
+}
+
+
+void MacroAssembler::PushCPURegList(CPURegList registers) {
+ VIXL_ASSERT(!registers.Overlaps(*TmpList()));
+ VIXL_ASSERT(!registers.Overlaps(*FPTmpList()));
+
+ int reg_size = registers.RegisterSizeInBytes();
+ PrepareForPush(registers.Count(), reg_size);
+
+ // Bump the stack pointer and store two registers at the bottom.
+ int size = registers.TotalSizeInBytes();
+ const CPURegister& bottom_0 = registers.PopLowestIndex();
+ const CPURegister& bottom_1 = registers.PopLowestIndex();
+ if (bottom_0.IsValid() && bottom_1.IsValid()) {
+ Stp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), -size, PreIndex));
+ } else if (bottom_0.IsValid()) {
+ Str(bottom_0, MemOperand(GetStackPointer64(), -size, PreIndex));
+ }
+
+ int offset = 2 * reg_size;
+ while (!registers.IsEmpty()) {
+ const CPURegister& src0 = registers.PopLowestIndex();
+ const CPURegister& src1 = registers.PopLowestIndex();
+ if (src1.IsValid()) {
+ Stp(src0, src1, MemOperand(GetStackPointer64(), offset));
+ } else {
+ Str(src0, MemOperand(GetStackPointer64(), offset));
+ }
+ offset += 2 * reg_size;
+ }
+}
+
+
+void MacroAssembler::PopCPURegList(CPURegList registers) {
+ VIXL_ASSERT(!registers.Overlaps(*TmpList()));
+ VIXL_ASSERT(!registers.Overlaps(*FPTmpList()));
+
+ int reg_size = registers.RegisterSizeInBytes();
+ PrepareForPop(registers.Count(), reg_size);
+
+
+ int size = registers.TotalSizeInBytes();
+ const CPURegister& bottom_0 = registers.PopLowestIndex();
+ const CPURegister& bottom_1 = registers.PopLowestIndex();
+
+ int offset = 2 * reg_size;
+ while (!registers.IsEmpty()) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ if (dst1.IsValid()) {
+ Ldp(dst0, dst1, MemOperand(GetStackPointer64(), offset));
+ } else {
+ Ldr(dst0, MemOperand(GetStackPointer64(), offset));
+ }
+ offset += 2 * reg_size;
+ }
+
+ // Load the two registers at the bottom and drop the stack pointer.
+ if (bottom_0.IsValid() && bottom_1.IsValid()) {
+ Ldp(bottom_0, bottom_1, MemOperand(GetStackPointer64(), size, PostIndex));
+ } else if (bottom_0.IsValid()) {
+ Ldr(bottom_0, MemOperand(GetStackPointer64(), size, PostIndex));
+ }
+}
+
+
+void MacroAssembler::PushMultipleTimes(int count, Register src) {
+ int size = src.SizeInBytes();
+
+ PrepareForPush(count, size);
+ // Push up to four registers at a time if possible because if the current
+ // stack pointer is sp and the register size is 32, registers must be pushed
+ // in blocks of four in order to maintain the 16-byte alignment for sp.
+ while (count >= 4) {
+ PushHelper(4, size, src, src, src, src);
+ count -= 4;
+ }
+ if (count >= 2) {
+ PushHelper(2, size, src, src, NoReg, NoReg);
+ count -= 2;
+ }
+ if (count == 1) {
+ PushHelper(1, size, src, NoReg, NoReg, NoReg);
+ count -= 1;
+ }
+ VIXL_ASSERT(count == 0);
+}
+
+
+void MacroAssembler::PushHelper(int count, int size,
+ const CPURegister& src0,
+ const CPURegister& src1,
+ const CPURegister& src2,
+ const CPURegister& src3) {
+ // Ensure that we don't unintentionally modify scratch or debug registers.
+ // Worst case for size is 2 stp.
+ InstructionAccurateScope scope(this, 2,
+ InstructionAccurateScope::kMaximumSize);
+
+ VIXL_ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+ VIXL_ASSERT(size == src0.SizeInBytes());
+
+ // Pushing the stack pointer has unexpected behavior. See PushStackPointer().
+ VIXL_ASSERT(!src0.Is(GetStackPointer64()) && !src0.Is(sp));
+ VIXL_ASSERT(!src1.Is(GetStackPointer64()) && !src1.Is(sp));
+ VIXL_ASSERT(!src2.Is(GetStackPointer64()) && !src2.Is(sp));
+ VIXL_ASSERT(!src3.Is(GetStackPointer64()) && !src3.Is(sp));
+
+ // The JS engine should never push 4 bytes.
+ VIXL_ASSERT(size >= 8);
+
+ // When pushing multiple registers, the store order is chosen such that
+ // Push(a, b) is equivalent to Push(a) followed by Push(b).
+ switch (count) {
+ case 1:
+ VIXL_ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+ str(src0, MemOperand(GetStackPointer64(), -1 * size, PreIndex));
+ break;
+ case 2:
+ VIXL_ASSERT(src2.IsNone() && src3.IsNone());
+ stp(src1, src0, MemOperand(GetStackPointer64(), -2 * size, PreIndex));
+ break;
+ case 3:
+ VIXL_ASSERT(src3.IsNone());
+ stp(src2, src1, MemOperand(GetStackPointer64(), -3 * size, PreIndex));
+ str(src0, MemOperand(GetStackPointer64(), 2 * size));
+ break;
+ case 4:
+ // Skip over 4 * size, then fill in the gap. This allows four W registers
+ // to be pushed using sp, whilst maintaining 16-byte alignment for sp at
+ // all times.
+ stp(src3, src2, MemOperand(GetStackPointer64(), -4 * size, PreIndex));
+ stp(src1, src0, MemOperand(GetStackPointer64(), 2 * size));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PopHelper(int count, int size,
+ const CPURegister& dst0,
+ const CPURegister& dst1,
+ const CPURegister& dst2,
+ const CPURegister& dst3) {
+ // Ensure that we don't unintentionally modify scratch or debug registers.
+ // Worst case for size is 2 ldp.
+ InstructionAccurateScope scope(this, 2,
+ InstructionAccurateScope::kMaximumSize);
+
+ VIXL_ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+ VIXL_ASSERT(size == dst0.SizeInBytes());
+
+ // When popping multiple registers, the load order is chosen such that
+ // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
+ switch (count) {
+ case 1:
+ VIXL_ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+ ldr(dst0, MemOperand(GetStackPointer64(), 1 * size, PostIndex));
+ break;
+ case 2:
+ VIXL_ASSERT(dst2.IsNone() && dst3.IsNone());
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 2 * size, PostIndex));
+ break;
+ case 3:
+ VIXL_ASSERT(dst3.IsNone());
+ ldr(dst2, MemOperand(GetStackPointer64(), 2 * size));
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 3 * size, PostIndex));
+ break;
+ case 4:
+ // Load the higher addresses first, then load the lower addresses and skip
+ // the whole block in the second instruction. This allows four W registers
+ // to be popped using sp, whilst maintaining 16-byte alignment for sp at
+ // all times.
+ ldp(dst2, dst3, MemOperand(GetStackPointer64(), 2 * size));
+ ldp(dst0, dst1, MemOperand(GetStackPointer64(), 4 * size, PostIndex));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void MacroAssembler::PrepareForPush(int count, int size) {
+ if (sp.Is(GetStackPointer64())) {
+ // If the current stack pointer is sp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ VIXL_ASSERT((count * size) % 16 == 0);
+ } else {
+ // Even if the current stack pointer is not the system stack pointer (sp),
+ // the system stack pointer will still be modified in order to comply with
+ // ABI rules about accessing memory below the system stack pointer.
+ BumpSystemStackPointer(count * size);
+ }
+}
+
+
+void MacroAssembler::PrepareForPop(int count, int size) {
+ USE(count, size);
+ if (sp.Is(GetStackPointer64())) {
+ // If the current stack pointer is sp, then it must be aligned to 16 bytes
+ // on entry and the total size of the specified registers must also be a
+ // multiple of 16 bytes.
+ VIXL_ASSERT((count * size) % 16 == 0);
+ }
+}
+
+void MacroAssembler::Poke(const Register& src, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ VIXL_ASSERT(offset.immediate() >= 0);
+ }
+
+ Str(src, MemOperand(GetStackPointer64(), offset));
+}
+
+
+void MacroAssembler::Peek(const Register& dst, const Operand& offset) {
+ if (offset.IsImmediate()) {
+ VIXL_ASSERT(offset.immediate() >= 0);
+ }
+
+ Ldr(dst, MemOperand(GetStackPointer64(), offset));
+}
+
+
+void MacroAssembler::Claim(const Operand& size) {
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (size.IsImmediate()) {
+ VIXL_ASSERT(size.immediate() > 0);
+ if (sp.Is(GetStackPointer64())) {
+ VIXL_ASSERT((size.immediate() % 16) == 0);
+ }
+ }
+
+ Sub(GetStackPointer64(), GetStackPointer64(), size);
+
+ // Make sure the real stack pointer reflects the claimed stack space.
+ // We can't use stack memory below the stack pointer, it could be clobbered by
+ // interupts and signal handlers.
+ if (!sp.Is(GetStackPointer64())) {
+ Mov(sp, GetStackPointer64());
+ }
+}
+
+
+void MacroAssembler::Drop(const Operand& size) {
+
+ if (size.IsZero()) {
+ return;
+ }
+
+ if (size.IsImmediate()) {
+ VIXL_ASSERT(size.immediate() > 0);
+ if (sp.Is(GetStackPointer64())) {
+ VIXL_ASSERT((size.immediate() % 16) == 0);
+ }
+ }
+
+ Add(GetStackPointer64(), GetStackPointer64(), size);
+}
+
+
+void MacroAssembler::PushCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ // 10 stp will be emitted.
+ // TODO(all): Should we use GetCalleeSaved and SavedFP.
+ InstructionAccurateScope scope(this, 10);
+
+ // This method must not be called unless the current stack pointer is sp.
+ VIXL_ASSERT(sp.Is(GetStackPointer64()));
+
+ MemOperand tos(sp, -2 * static_cast<int>(kXRegSizeInBytes), PreIndex);
+
+ stp(x29, x30, tos);
+ stp(x27, x28, tos);
+ stp(x25, x26, tos);
+ stp(x23, x24, tos);
+ stp(x21, x22, tos);
+ stp(x19, x20, tos);
+
+ stp(d14, d15, tos);
+ stp(d12, d13, tos);
+ stp(d10, d11, tos);
+ stp(d8, d9, tos);
+}
+
+
+void MacroAssembler::PopCalleeSavedRegisters() {
+ // Ensure that the macro-assembler doesn't use any scratch registers.
+ // 10 ldp will be emitted.
+ // TODO(all): Should we use GetCalleeSaved and SavedFP.
+ InstructionAccurateScope scope(this, 10);
+
+ // This method must not be called unless the current stack pointer is sp.
+ VIXL_ASSERT(sp.Is(GetStackPointer64()));
+
+ MemOperand tos(sp, 2 * kXRegSizeInBytes, PostIndex);
+
+ ldp(d8, d9, tos);
+ ldp(d10, d11, tos);
+ ldp(d12, d13, tos);
+ ldp(d14, d15, tos);
+
+ ldp(x19, x20, tos);
+ ldp(x21, x22, tos);
+ ldp(x23, x24, tos);
+ ldp(x25, x26, tos);
+ ldp(x27, x28, tos);
+ ldp(x29, x30, tos);
+}
+
+void MacroAssembler::LoadCPURegList(CPURegList registers,
+ const MemOperand& src) {
+ LoadStoreCPURegListHelper(kLoad, registers, src);
+}
+
+void MacroAssembler::StoreCPURegList(CPURegList registers,
+ const MemOperand& dst) {
+ LoadStoreCPURegListHelper(kStore, registers, dst);
+}
+
+
+void MacroAssembler::LoadStoreCPURegListHelper(LoadStoreCPURegListAction op,
+ CPURegList registers,
+ const MemOperand& mem) {
+ // We do not handle pre-indexing or post-indexing.
+ VIXL_ASSERT(!(mem.IsPreIndex() || mem.IsPostIndex()));
+ VIXL_ASSERT(!registers.Overlaps(tmp_list_));
+ VIXL_ASSERT(!registers.Overlaps(fptmp_list_));
+ VIXL_ASSERT(!registers.IncludesAliasOf(sp));
+
+ UseScratchRegisterScope temps(this);
+
+ MemOperand loc = BaseMemOperandForLoadStoreCPURegList(registers,
+ mem,
+ &temps);
+
+ while (registers.Count() >= 2) {
+ const CPURegister& dst0 = registers.PopLowestIndex();
+ const CPURegister& dst1 = registers.PopLowestIndex();
+ if (op == kStore) {
+ Stp(dst0, dst1, loc);
+ } else {
+ VIXL_ASSERT(op == kLoad);
+ Ldp(dst0, dst1, loc);
+ }
+ loc.AddOffset(2 * registers.RegisterSizeInBytes());
+ }
+ if (!registers.IsEmpty()) {
+ if (op == kStore) {
+ Str(registers.PopLowestIndex(), loc);
+ } else {
+ VIXL_ASSERT(op == kLoad);
+ Ldr(registers.PopLowestIndex(), loc);
+ }
+ }
+}
+
+MemOperand MacroAssembler::BaseMemOperandForLoadStoreCPURegList(
+ const CPURegList& registers,
+ const MemOperand& mem,
+ UseScratchRegisterScope* scratch_scope) {
+ // If necessary, pre-compute the base address for the accesses.
+ if (mem.IsRegisterOffset()) {
+ Register reg_base = scratch_scope->AcquireX();
+ ComputeAddress(reg_base, mem);
+ return MemOperand(reg_base);
+
+ } else if (mem.IsImmediateOffset()) {
+ int reg_size = registers.RegisterSizeInBytes();
+ int total_size = registers.TotalSizeInBytes();
+ int64_t min_offset = mem.offset();
+ int64_t max_offset = mem.offset() + std::max(0, total_size - 2 * reg_size);
+ if ((registers.Count() >= 2) &&
+ (!Assembler::IsImmLSPair(min_offset, WhichPowerOf2(reg_size)) ||
+ !Assembler::IsImmLSPair(max_offset, WhichPowerOf2(reg_size)))) {
+ Register reg_base = scratch_scope->AcquireX();
+ ComputeAddress(reg_base, mem);
+ return MemOperand(reg_base);
+ }
+ }
+
+ return mem;
+}
+
+void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
+ VIXL_ASSERT(!sp.Is(GetStackPointer64()));
+ // TODO: Several callers rely on this not using scratch registers, so we use
+ // the assembler directly here. However, this means that large immediate
+ // values of 'space' cannot be handled.
+ InstructionAccurateScope scope(this, 1);
+ sub(sp, GetStackPointer64(), space);
+}
+
+
+void MacroAssembler::Trace(TraceParameters parameters, TraceCommand command) {
+
+#ifdef JS_SIMULATOR_ARM64
+ // The arguments to the trace pseudo instruction need to be contiguous in
+ // memory, so make sure we don't try to emit a literal pool.
+ InstructionAccurateScope scope(this, kTraceLength / kInstructionSize);
+
+ Label start;
+ bind(&start);
+
+ // Refer to simulator-a64.h for a description of the marker and its
+ // arguments.
+ hlt(kTraceOpcode);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceParamsOffset);
+ dc32(parameters);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kTraceCommandOffset);
+ dc32(command);
+#else
+ // Emit nothing on real hardware.
+ USE(parameters, command);
+#endif
+}
+
+
+void MacroAssembler::Log(TraceParameters parameters) {
+
+#ifdef JS_SIMULATOR_ARM64
+ // The arguments to the log pseudo instruction need to be contiguous in
+ // memory, so make sure we don't try to emit a literal pool.
+ InstructionAccurateScope scope(this, kLogLength / kInstructionSize);
+
+ Label start;
+ bind(&start);
+
+ // Refer to simulator-a64.h for a description of the marker and its
+ // arguments.
+ hlt(kLogOpcode);
+
+ // VIXL_ASSERT(SizeOfCodeGeneratedSince(&start) == kLogParamsOffset);
+ dc32(parameters);
+#else
+ // Emit nothing on real hardware.
+ USE(parameters);
+#endif
+}
+
+
+void MacroAssembler::EnableInstrumentation() {
+ VIXL_ASSERT(!isprint(InstrumentStateEnable));
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateEnable);
+}
+
+
+void MacroAssembler::DisableInstrumentation() {
+ VIXL_ASSERT(!isprint(InstrumentStateDisable));
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, InstrumentStateDisable);
+}
+
+
+void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
+ VIXL_ASSERT(strlen(marker_name) == 2);
+
+ // We allow only printable characters in the marker names. Unprintable
+ // characters are reserved for controlling features of the instrumentation.
+ VIXL_ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+
+ InstructionAccurateScope scope(this, 1);
+ movn(xzr, (marker_name[1] << 8) | marker_name[0]);
+}
+
+
+void UseScratchRegisterScope::Open(MacroAssembler* masm) {
+ VIXL_ASSERT(!initialised_);
+ available_ = masm->TmpList();
+ availablefp_ = masm->FPTmpList();
+ old_available_ = available_->list();
+ old_availablefp_ = availablefp_->list();
+ VIXL_ASSERT(available_->type() == CPURegister::kRegister);
+ VIXL_ASSERT(availablefp_->type() == CPURegister::kVRegister);
+#ifdef DEBUG
+ initialised_ = true;
+#endif
+}
+
+
+void UseScratchRegisterScope::Close() {
+ if (available_) {
+ available_->set_list(old_available_);
+ available_ = NULL;
+ }
+ if (availablefp_) {
+ availablefp_->set_list(old_availablefp_);
+ availablefp_ = NULL;
+ }
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+}
+
+
+UseScratchRegisterScope::UseScratchRegisterScope(MacroAssembler* masm) {
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+ Open(masm);
+}
+
+// This allows deferred (and optional) initialisation of the scope.
+UseScratchRegisterScope::UseScratchRegisterScope()
+ : available_(NULL), availablefp_(NULL),
+ old_available_(0), old_availablefp_(0) {
+#ifdef DEBUG
+ initialised_ = false;
+#endif
+}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ Close();
+}
+
+
+bool UseScratchRegisterScope::IsAvailable(const CPURegister& reg) const {
+ return available_->IncludesAliasOf(reg) || availablefp_->IncludesAliasOf(reg);
+}
+
+
+Register UseScratchRegisterScope::AcquireSameSizeAs(const Register& reg) {
+ int code = AcquireNextAvailable(available_).code();
+ return Register(code, reg.size());
+}
+
+
+FPRegister UseScratchRegisterScope::AcquireSameSizeAs(const FPRegister& reg) {
+ int code = AcquireNextAvailable(availablefp_).code();
+ return FPRegister(code, reg.size());
+}
+
+
+void UseScratchRegisterScope::Release(const CPURegister& reg) {
+ VIXL_ASSERT(initialised_);
+ if (reg.IsRegister()) {
+ ReleaseByCode(available_, reg.code());
+ } else if (reg.IsFPRegister()) {
+ ReleaseByCode(availablefp_, reg.code());
+ } else {
+ VIXL_ASSERT(reg.IsNone());
+ }
+}
+
+
+void UseScratchRegisterScope::Include(const CPURegList& list) {
+ VIXL_ASSERT(initialised_);
+ if (list.type() == CPURegister::kRegister) {
+ // Make sure that neither sp nor xzr are included the list.
+ IncludeByRegList(available_, list.list() & ~(xzr.Bit() | sp.Bit()));
+ } else {
+ VIXL_ASSERT(list.type() == CPURegister::kVRegister);
+ IncludeByRegList(availablefp_, list.list());
+ }
+}
+
+
+void UseScratchRegisterScope::Include(const Register& reg1,
+ const Register& reg2,
+ const Register& reg3,
+ const Register& reg4) {
+ VIXL_ASSERT(initialised_);
+ RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ // Make sure that neither sp nor xzr are included the list.
+ include &= ~(xzr.Bit() | sp.Bit());
+
+ IncludeByRegList(available_, include);
+}
+
+
+void UseScratchRegisterScope::Include(const FPRegister& reg1,
+ const FPRegister& reg2,
+ const FPRegister& reg3,
+ const FPRegister& reg4) {
+ RegList include = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ IncludeByRegList(availablefp_, include);
+}
+
+
+void UseScratchRegisterScope::Exclude(const CPURegList& list) {
+ if (list.type() == CPURegister::kRegister) {
+ ExcludeByRegList(available_, list.list());
+ } else {
+ VIXL_ASSERT(list.type() == CPURegister::kVRegister);
+ ExcludeByRegList(availablefp_, list.list());
+ }
+}
+
+
+void UseScratchRegisterScope::Exclude(const Register& reg1,
+ const Register& reg2,
+ const Register& reg3,
+ const Register& reg4) {
+ RegList exclude = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ ExcludeByRegList(available_, exclude);
+}
+
+
+void UseScratchRegisterScope::Exclude(const FPRegister& reg1,
+ const FPRegister& reg2,
+ const FPRegister& reg3,
+ const FPRegister& reg4) {
+ RegList excludefp = reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit();
+ ExcludeByRegList(availablefp_, excludefp);
+}
+
+
+void UseScratchRegisterScope::Exclude(const CPURegister& reg1,
+ const CPURegister& reg2,
+ const CPURegister& reg3,
+ const CPURegister& reg4) {
+ RegList exclude = 0;
+ RegList excludefp = 0;
+
+ const CPURegister regs[] = {reg1, reg2, reg3, reg4};
+
+ for (unsigned i = 0; i < (sizeof(regs) / sizeof(regs[0])); i++) {
+ if (regs[i].IsRegister()) {
+ exclude |= regs[i].Bit();
+ } else if (regs[i].IsFPRegister()) {
+ excludefp |= regs[i].Bit();
+ } else {
+ VIXL_ASSERT(regs[i].IsNone());
+ }
+ }
+
+ ExcludeByRegList(available_, exclude);
+ ExcludeByRegList(availablefp_, excludefp);
+}
+
+
+void UseScratchRegisterScope::ExcludeAll() {
+ ExcludeByRegList(available_, available_->list());
+ ExcludeByRegList(availablefp_, availablefp_->list());
+}
+
+
+CPURegister UseScratchRegisterScope::AcquireNextAvailable(
+ CPURegList* available) {
+ VIXL_CHECK(!available->IsEmpty());
+ CPURegister result = available->PopLowestIndex();
+ VIXL_ASSERT(!AreAliased(result, xzr, sp));
+ return result;
+}
+
+
+void UseScratchRegisterScope::ReleaseByCode(CPURegList* available, int code) {
+ ReleaseByRegList(available, static_cast<RegList>(1) << code);
+}
+
+
+void UseScratchRegisterScope::ReleaseByRegList(CPURegList* available,
+ RegList regs) {
+ available->set_list(available->list() | regs);
+}
+
+
+void UseScratchRegisterScope::IncludeByRegList(CPURegList* available,
+ RegList regs) {
+ available->set_list(available->list() | regs);
+}
+
+
+void UseScratchRegisterScope::ExcludeByRegList(CPURegList* available,
+ RegList exclude) {
+ available->set_list(available->list() & ~exclude);
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MacroAssembler-vixl.h b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h
new file mode 100644
index 0000000000..232643be66
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MacroAssembler-vixl.h
@@ -0,0 +1,2516 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_MACRO_ASSEMBLER_A64_H_
+#define VIXL_A64_MACRO_ASSEMBLER_A64_H_
+
+#include <algorithm>
+#include <limits>
+
+#include "jit/arm64/Assembler-arm64.h"
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instrument-vixl.h"
+#include "jit/arm64/vixl/Simulator-Constants-vixl.h"
+
+#define LS_MACRO_LIST(V) \
+ V(Ldrb, Register&, rt, LDRB_w) \
+ V(Strb, Register&, rt, STRB_w) \
+ V(Ldrsb, Register&, rt, rt.Is64Bits() ? LDRSB_x : LDRSB_w) \
+ V(Ldrh, Register&, rt, LDRH_w) \
+ V(Strh, Register&, rt, STRH_w) \
+ V(Ldrsh, Register&, rt, rt.Is64Bits() ? LDRSH_x : LDRSH_w) \
+ V(Ldr, CPURegister&, rt, LoadOpFor(rt)) \
+ V(Str, CPURegister&, rt, StoreOpFor(rt)) \
+ V(Ldrsw, Register&, rt, LDRSW_x)
+
+
+#define LSPAIR_MACRO_LIST(V) \
+ V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2)) \
+ V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
+ V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
+
+namespace vixl {
+
+// Forward declaration
+class MacroAssembler;
+class UseScratchRegisterScope;
+
+// This scope has the following purposes:
+// * Acquire/Release the underlying assembler's code buffer.
+// * This is mandatory before emitting.
+// * Emit the literal or veneer pools if necessary before emitting the
+// macro-instruction.
+// * Ensure there is enough space to emit the macro-instruction.
+class EmissionCheckScope {
+ public:
+ EmissionCheckScope(MacroAssembler* masm, size_t size)
+ : masm_(masm)
+ { }
+
+ protected:
+ MacroAssembler* masm_;
+#ifdef DEBUG
+ Label start_;
+ size_t size_;
+#endif
+};
+
+
+// Helper for common Emission checks.
+// The macro-instruction maps to a single instruction.
+class SingleEmissionCheckScope : public EmissionCheckScope {
+ public:
+ explicit SingleEmissionCheckScope(MacroAssembler* masm)
+ : EmissionCheckScope(masm, kInstructionSize) {}
+};
+
+
+// The macro instruction is a "typical" macro-instruction. Typical macro-
+// instruction only emit a few instructions, a few being defined as 8 here.
+class MacroEmissionCheckScope : public EmissionCheckScope {
+ public:
+ explicit MacroEmissionCheckScope(MacroAssembler* masm)
+ : EmissionCheckScope(masm, kTypicalMacroInstructionMaxSize) {}
+
+ private:
+ static const size_t kTypicalMacroInstructionMaxSize = 8 * kInstructionSize;
+};
+
+
+enum BranchType {
+ // Copies of architectural conditions.
+ // The associated conditions can be used in place of those, the code will
+ // take care of reinterpreting them with the correct type.
+ integer_eq = eq,
+ integer_ne = ne,
+ integer_hs = hs,
+ integer_lo = lo,
+ integer_mi = mi,
+ integer_pl = pl,
+ integer_vs = vs,
+ integer_vc = vc,
+ integer_hi = hi,
+ integer_ls = ls,
+ integer_ge = ge,
+ integer_lt = lt,
+ integer_gt = gt,
+ integer_le = le,
+ integer_al = al,
+ integer_nv = nv,
+
+ // These two are *different* from the architectural codes al and nv.
+ // 'always' is used to generate unconditional branches.
+ // 'never' is used to not generate a branch (generally as the inverse
+ // branch type of 'always).
+ always, never,
+ // cbz and cbnz
+ reg_zero, reg_not_zero,
+ // tbz and tbnz
+ reg_bit_clear, reg_bit_set,
+
+ // Aliases.
+ kBranchTypeFirstCondition = eq,
+ kBranchTypeLastCondition = nv,
+ kBranchTypeFirstUsingReg = reg_zero,
+ kBranchTypeFirstUsingBit = reg_bit_clear
+};
+
+
+enum DiscardMoveMode { kDontDiscardForSameWReg, kDiscardForSameWReg };
+
+// The macro assembler supports moving automatically pre-shifted immediates for
+// arithmetic and logical instructions, and then applying a post shift in the
+// instruction to undo the modification, in order to reduce the code emitted for
+// an operation. For example:
+//
+// Add(x0, x0, 0x1f7de) => movz x16, 0xfbef; add x0, x0, x16, lsl #1.
+//
+// This optimisation can be only partially applied when the stack pointer is an
+// operand or destination, so this enumeration is used to control the shift.
+enum PreShiftImmMode {
+ kNoShift, // Don't pre-shift.
+ kLimitShiftForSP, // Limit pre-shift for add/sub extend use.
+ kAnyShift // Allow any pre-shift.
+};
+
+
+class MacroAssembler : public js::jit::Assembler {
+ public:
+ MacroAssembler();
+
+ // Finalize a code buffer of generated instructions. This function must be
+ // called before executing or copying code from the buffer.
+ void FinalizeCode();
+
+
+ // Constant generation helpers.
+ // These functions return the number of instructions required to move the
+ // immediate into the destination register. Also, if the masm pointer is
+ // non-null, it generates the code to do so.
+ // The two features are implemented using one function to avoid duplication of
+ // the logic.
+ // The function can be used to evaluate the cost of synthesizing an
+ // instruction using 'mov immediate' instructions. A user might prefer loading
+ // a constant using the literal pool instead of using multiple 'mov immediate'
+ // instructions.
+ static int MoveImmediateHelper(MacroAssembler* masm,
+ const Register &rd,
+ uint64_t imm);
+ static bool OneInstrMoveImmediateHelper(MacroAssembler* masm,
+ const Register& dst,
+ int64_t imm);
+
+
+ // Logical macros.
+ void And(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Ands(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Bic(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Bics(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Orr(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Orn(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Eor(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Eon(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Tst(const Register& rn, const Operand& operand);
+ void LogicalMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ LogicalOp op);
+
+ // Add and sub macros.
+ void Add(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+ void Adds(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sub(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S = LeaveFlags);
+ void Subs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Cmn(const Register& rn, const Operand& operand);
+ void Cmp(const Register& rn, const Operand& operand);
+ void Neg(const Register& rd,
+ const Operand& operand);
+ void Negs(const Register& rd,
+ const Operand& operand);
+
+ void AddSubMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubOp op);
+
+ // Add/sub with carry macros.
+ void Adc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Adcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sbc(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Sbcs(const Register& rd,
+ const Register& rn,
+ const Operand& operand);
+ void Ngc(const Register& rd,
+ const Operand& operand);
+ void Ngcs(const Register& rd,
+ const Operand& operand);
+ void AddSubWithCarryMacro(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ FlagsUpdate S,
+ AddSubWithCarryOp op);
+
+ // Move macros.
+ void Mov(const Register& rd, uint64_t imm);
+ void Mov(const Register& rd,
+ const Operand& operand,
+ DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+ void Mvn(const Register& rd, uint64_t imm) {
+ Mov(rd, (rd.size() == kXRegSize) ? ~imm : (~imm & kWRegMask));
+ }
+ void Mvn(const Register& rd, const Operand& operand);
+
+ // Try to move an immediate into the destination register in a single
+ // instruction. Returns true for success, and updates the contents of dst.
+ // Returns false, otherwise.
+ bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
+
+ // Move an immediate into register dst, and return an Operand object for
+ // use with a subsequent instruction that accepts a shift. The value moved
+ // into dst is not necessarily equal to imm; it may have had a shifting
+ // operation applied to it that will be subsequently undone by the shift
+ // applied in the Operand.
+ Operand MoveImmediateForShiftedOp(const Register& dst,
+ int64_t imm,
+ PreShiftImmMode mode);
+
+ // Synthesises the address represented by a MemOperand into a register.
+ void ComputeAddress(const Register& dst, const MemOperand& mem_op);
+
+ // Conditional macros.
+ void Ccmp(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void Ccmn(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond);
+ void ConditionalCompareMacro(const Register& rn,
+ const Operand& operand,
+ StatusFlags nzcv,
+ Condition cond,
+ ConditionalCompareOp op);
+ void Csel(const Register& rd,
+ const Register& rn,
+ const Operand& operand,
+ Condition cond);
+
+ // Load/store macros.
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, OP) \
+ void FN(const REGTYPE REG, const MemOperand& addr);
+ LS_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStoreMacro(const CPURegister& rt,
+ const MemOperand& addr,
+ LoadStoreOp op);
+
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+ void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
+ LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+ void LoadStorePairMacro(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& addr,
+ LoadStorePairOp op);
+
+ void Prfm(PrefetchOperation op, const MemOperand& addr);
+
+ // Push or pop up to 4 registers of the same width to or from the stack,
+ // using the current stack pointer as set by SetStackPointer.
+ //
+ // If an argument register is 'NoReg', all further arguments are also assumed
+ // to be 'NoReg', and are thus not pushed or popped.
+ //
+ // Arguments are ordered such that "Push(a, b);" is functionally equivalent
+ // to "Push(a); Push(b);".
+ //
+ // It is valid to push the same register more than once, and there is no
+ // restriction on the order in which registers are specified.
+ //
+ // It is not valid to pop into the same register more than once in one
+ // operation, not even into the zero register.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then it
+ // must be aligned to 16 bytes on entry and the total size of the specified
+ // registers must also be a multiple of 16 bytes.
+ //
+ // Even if the current stack pointer is not the system stack pointer (sp),
+ // Push (and derived methods) will still modify the system stack pointer in
+ // order to comply with ABI rules about accessing memory below the system
+ // stack pointer.
+ //
+ // Other than the registers passed into Pop, the stack pointer and (possibly)
+ // the system stack pointer, these methods do not modify any other registers.
+ void Push(const CPURegister& src0, const CPURegister& src1 = NoReg,
+ const CPURegister& src2 = NoReg, const CPURegister& src3 = NoReg);
+ void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
+ const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+ void PushStackPointer();
+
+ // Alternative forms of Push and Pop, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses (as in the A32 push
+ // and pop instructions).
+ //
+ // (Push|Pop)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
+ // supported.
+ //
+ // Otherwise, (Push|Pop)(CPU|X|W|D|S)RegList is preferred.
+ void PushCPURegList(CPURegList registers);
+ void PopCPURegList(CPURegList registers);
+
+ void PushSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PushCPURegList(CPURegList(type, reg_size, registers));
+ }
+ void PopSizeRegList(RegList registers, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PopCPURegList(CPURegList(type, reg_size, registers));
+ }
+ void PushXRegList(RegList regs) {
+ PushSizeRegList(regs, kXRegSize);
+ }
+ void PopXRegList(RegList regs) {
+ PopSizeRegList(regs, kXRegSize);
+ }
+ void PushWRegList(RegList regs) {
+ PushSizeRegList(regs, kWRegSize);
+ }
+ void PopWRegList(RegList regs) {
+ PopSizeRegList(regs, kWRegSize);
+ }
+ void PushDRegList(RegList regs) {
+ PushSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
+ }
+ void PopDRegList(RegList regs) {
+ PopSizeRegList(regs, kDRegSize, CPURegister::kVRegister);
+ }
+ void PushSRegList(RegList regs) {
+ PushSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
+ }
+ void PopSRegList(RegList regs) {
+ PopSizeRegList(regs, kSRegSize, CPURegister::kVRegister);
+ }
+
+ // Push the specified register 'count' times.
+ void PushMultipleTimes(int count, Register src);
+
+ // Poke 'src' onto the stack. The offset is in bytes.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then sp
+ // must be aligned to 16 bytes.
+ void Poke(const Register& src, const Operand& offset);
+
+ // Peek at a value on the stack, and put it in 'dst'. The offset is in bytes.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then sp
+ // must be aligned to 16 bytes.
+ void Peek(const Register& dst, const Operand& offset);
+
+ // Alternative forms of Peek and Poke, taking a RegList or CPURegList that
+ // specifies the registers that are to be pushed or popped. Higher-numbered
+ // registers are associated with higher memory addresses.
+ //
+ // (Peek|Poke)SizeRegList allow you to specify the register size as a
+ // parameter. Only kXRegSize, kWRegSize, kDRegSize and kSRegSize are
+ // supported.
+ //
+ // Otherwise, (Peek|Poke)(CPU|X|W|D|S)RegList is preferred.
+ void PeekCPURegList(CPURegList registers, int64_t offset) {
+ LoadCPURegList(registers, MemOperand(StackPointer(), offset));
+ }
+ void PokeCPURegList(CPURegList registers, int64_t offset) {
+ StoreCPURegList(registers, MemOperand(StackPointer(), offset));
+ }
+
+ void PeekSizeRegList(RegList registers, int64_t offset, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PeekCPURegList(CPURegList(type, reg_size, registers), offset);
+ }
+ void PokeSizeRegList(RegList registers, int64_t offset, unsigned reg_size,
+ CPURegister::RegisterType type = CPURegister::kRegister) {
+ PokeCPURegList(CPURegList(type, reg_size, registers), offset);
+ }
+ void PeekXRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kXRegSize);
+ }
+ void PokeXRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kXRegSize);
+ }
+ void PeekWRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kWRegSize);
+ }
+ void PokeWRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kWRegSize);
+ }
+ void PeekDRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
+ }
+ void PokeDRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kDRegSize, CPURegister::kVRegister);
+ }
+ void PeekSRegList(RegList regs, int64_t offset) {
+ PeekSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
+ }
+ void PokeSRegList(RegList regs, int64_t offset) {
+ PokeSizeRegList(regs, offset, kSRegSize, CPURegister::kVRegister);
+ }
+
+
+ // Claim or drop stack space without actually accessing memory.
+ //
+ // If the current stack pointer (as set by SetStackPointer) is sp, then it
+ // must be aligned to 16 bytes and the size claimed or dropped must be a
+ // multiple of 16 bytes.
+ void Claim(const Operand& size);
+ void Drop(const Operand& size);
+
+ // Preserve the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are pushed before lower-numbered registers, and
+ // thus get higher addresses.
+ // Floating-point registers are pushed before general-purpose registers, and
+ // thus get higher addresses.
+ //
+ // This method must not be called unless StackPointer() is sp, and it is
+ // aligned to 16 bytes.
+ void PushCalleeSavedRegisters();
+
+ // Restore the callee-saved registers (as defined by AAPCS64).
+ //
+ // Higher-numbered registers are popped after lower-numbered registers, and
+ // thus come from higher addresses.
+ // Floating-point registers are popped after general-purpose registers, and
+ // thus come from higher addresses.
+ //
+ // This method must not be called unless StackPointer() is sp, and it is
+ // aligned to 16 bytes.
+ void PopCalleeSavedRegisters();
+
+ void LoadCPURegList(CPURegList registers, const MemOperand& src);
+ void StoreCPURegList(CPURegList registers, const MemOperand& dst);
+
+ // Remaining instructions are simple pass-through calls to the assembler.
+ void Adr(const Register& rd, Label* label) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ adr(rd, label);
+ }
+ void Adrp(const Register& rd, Label* label) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ adrp(rd, label);
+ }
+ void Asr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ asr(rd, rn, shift);
+ }
+ void Asr(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ asrv(rd, rn, rm);
+ }
+
+ // Branch type inversion relies on these relations.
+ VIXL_STATIC_ASSERT((reg_zero == (reg_not_zero ^ 1)) &&
+ (reg_bit_clear == (reg_bit_set ^ 1)) &&
+ (always == (never ^ 1)));
+
+ BranchType InvertBranchType(BranchType type) {
+ if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
+ return static_cast<BranchType>(
+ InvertCondition(static_cast<Condition>(type)));
+ } else {
+ return static_cast<BranchType>(type ^ 1);
+ }
+ }
+
+ void B(Label* label, BranchType type, Register reg = NoReg, int bit = -1);
+
+ void B(Label* label);
+ void B(Label* label, Condition cond);
+ void B(Condition cond, Label* label) {
+ B(label, cond);
+ }
+ void Bfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfm(rd, rn, immr, imms);
+ }
+ void Bfi(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfi(rd, rn, lsb, width);
+ }
+ void Bfxil(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ bfxil(rd, rn, lsb, width);
+ }
+ void Bind(Label* label);
+ // Bind a label to a specified offset from the start of the buffer.
+ void BindToOffset(Label* label, ptrdiff_t offset);
+ void Bl(Label* label) {
+ SingleEmissionCheckScope guard(this);
+ bl(label);
+ }
+ void Blr(const Register& xn) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ blr(xn);
+ }
+ void Br(const Register& xn) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ br(xn);
+ }
+ void Brk(int code = 0) {
+ SingleEmissionCheckScope guard(this);
+ brk(code);
+ }
+ void Cbnz(const Register& rt, Label* label);
+ void Cbz(const Register& rt, Label* label);
+ void Cinc(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cinc(rd, rn, cond);
+ }
+ void Cinv(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cinv(rd, rn, cond);
+ }
+ void Clrex() {
+ SingleEmissionCheckScope guard(this);
+ clrex();
+ }
+ void Cls(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cls(rd, rn);
+ }
+ void Clz(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ clz(rd, rn);
+ }
+ void Cneg(const Register& rd, const Register& rn, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cneg(rd, rn, cond);
+ }
+ void Cset(const Register& rd, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ cset(rd, cond);
+ }
+ void Csetm(const Register& rd, Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ csetm(rd, cond);
+ }
+ void Csinc(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ // The VIXL source code contains these assertions, but the AArch64 ISR
+ // explicitly permits the use of zero registers. CSET itself is defined
+ // in terms of CSINC with WZR/XZR.
+ //
+ // VIXL_ASSERT(!rn.IsZero());
+ // VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csinc(rd, rn, rm, cond);
+ }
+ void Csinv(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csinv(rd, rn, rm, cond);
+ }
+ void Csneg(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ Condition cond) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ csneg(rd, rn, rm, cond);
+ }
+ void Dmb(BarrierDomain domain, BarrierType type) {
+ SingleEmissionCheckScope guard(this);
+ dmb(domain, type);
+ }
+ void Dsb(BarrierDomain domain, BarrierType type) {
+ SingleEmissionCheckScope guard(this);
+ dsb(domain, type);
+ }
+ void Extr(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ unsigned lsb) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ extr(rd, rn, rm, lsb);
+ }
+ void Fadd(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fadd(vd, vn, vm);
+ }
+ void Fccmp(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond,
+ FPTrapFlags trap = DisableTrap) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ FPCCompareMacro(vn, vm, nzcv, cond, trap);
+ }
+ void Fccmpe(const VRegister& vn,
+ const VRegister& vm,
+ StatusFlags nzcv,
+ Condition cond) {
+ Fccmp(vn, vm, nzcv, cond, EnableTrap);
+ }
+ void Fcmp(const VRegister& vn, const VRegister& vm,
+ FPTrapFlags trap = DisableTrap) {
+ SingleEmissionCheckScope guard(this);
+ FPCompareMacro(vn, vm, trap);
+ }
+ void Fcmp(const VRegister& vn, double value,
+ FPTrapFlags trap = DisableTrap);
+ void Fcmpe(const VRegister& vn, double value);
+ void Fcmpe(const VRegister& vn, const VRegister& vm) {
+ Fcmp(vn, vm, EnableTrap);
+ }
+ void Fcsel(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ Condition cond) {
+ VIXL_ASSERT((cond != al) && (cond != nv));
+ SingleEmissionCheckScope guard(this);
+ fcsel(vd, vn, vm, cond);
+ }
+ void Fcvt(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvt(vd, vn);
+ }
+ void Fcvtl(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtl(vd, vn);
+ }
+ void Fcvtl2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtl2(vd, vn);
+ }
+ void Fcvtn(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtn(vd, vn);
+ }
+ void Fcvtn2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtn2(vd, vn);
+ }
+ void Fcvtxn(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtxn(vd, vn);
+ }
+ void Fcvtxn2(const VRegister& vd, const VRegister& vn) {
+ SingleEmissionCheckScope guard(this);
+ fcvtxn2(vd, vn);
+ }
+ void Fcvtas(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtas(rd, vn);
+ }
+ void Fcvtau(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtau(rd, vn);
+ }
+ void Fcvtms(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtms(rd, vn);
+ }
+ void Fcvtmu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtmu(rd, vn);
+ }
+ void Fcvtns(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtns(rd, vn);
+ }
+ void Fcvtnu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtnu(rd, vn);
+ }
+ void Fcvtps(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtps(rd, vn);
+ }
+ void Fcvtpu(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtpu(rd, vn);
+ }
+ void Fcvtzs(const Register& rd, const VRegister& vn, int fbits = 0) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtzs(rd, vn, fbits);
+ }
+ void Fjcvtzs(const Register& rd, const VRegister& vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fjcvtzs(rd, vn);
+ }
+ void Fcvtzu(const Register& rd, const VRegister& vn, int fbits = 0) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fcvtzu(rd, vn, fbits);
+ }
+ void Fdiv(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fdiv(vd, vn, vm);
+ }
+ void Fmax(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmax(vd, vn, vm);
+ }
+ void Fmaxnm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmaxnm(vd, vn, vm);
+ }
+ void Fmin(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmin(vd, vn, vm);
+ }
+ void Fminnm(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fminnm(vd, vn, vm);
+ }
+ void Fmov(VRegister vd, VRegister vn) {
+ SingleEmissionCheckScope guard(this);
+ // Only emit an instruction if vd and vn are different, and they are both D
+ // registers. fmov(s0, s0) is not a no-op because it clears the top word of
+ // d0. Technically, fmov(d0, d0) is not a no-op either because it clears
+ // the top of q0, but VRegister does not currently support Q registers.
+ if (!vd.Is(vn) || !vd.Is64Bits()) {
+ fmov(vd, vn);
+ }
+ }
+ void Fmov(VRegister vd, Register rn) {
+ SingleEmissionCheckScope guard(this);
+ fmov(vd, rn);
+ }
+ void Fmov(const VRegister& vd, int index, const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ fmov(vd, index, rn);
+ }
+ void Fmov(const Register& rd, const VRegister& vn, int index) {
+ SingleEmissionCheckScope guard(this);
+ fmov(rd, vn, index);
+ }
+
+ // Provide explicit double and float interfaces for FP immediate moves, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of vd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ void Fmov(VRegister vd, double imm);
+ void Fmov(VRegister vd, float imm);
+ // Provide a template to allow other types to be converted automatically.
+ template<typename T>
+ void Fmov(VRegister vd, T imm) {
+ Fmov(vd, static_cast<double>(imm));
+ }
+ void Fmov(Register rd, VRegister vn) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ fmov(rd, vn);
+ }
+ void Fmul(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fmul(vd, vn, vm);
+ }
+ void Fnmul(const VRegister& vd, const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fnmul(vd, vn, vm);
+ }
+ void Fmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fmadd(vd, vn, vm, va);
+ }
+ void Fmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fmsub(vd, vn, vm, va);
+ }
+ void Fnmadd(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fnmadd(vd, vn, vm, va);
+ }
+ void Fnmsub(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ const VRegister& va) {
+ SingleEmissionCheckScope guard(this);
+ fnmsub(vd, vn, vm, va);
+ }
+ void Fsub(const VRegister& vd, const VRegister& vn, const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ fsub(vd, vn, vm);
+ }
+ void Hint(SystemHint code) {
+ SingleEmissionCheckScope guard(this);
+ hint(code);
+ }
+ void Hlt(int code) {
+ SingleEmissionCheckScope guard(this);
+ hlt(code);
+ }
+ void Isb() {
+ SingleEmissionCheckScope guard(this);
+ isb();
+ }
+ void Ldar(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldar(rt, src);
+ }
+ void Ldarb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldarb(rt, src);
+ }
+ void Ldarh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldarh(rt, src);
+ }
+ void Ldaxp(const Register& rt, const Register& rt2, const MemOperand& src) {
+ VIXL_ASSERT(!rt.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ ldaxp(rt, rt2, src);
+ }
+ void Ldaxr(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxr(rt, src);
+ }
+ void Ldaxrb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxrb(rt, src);
+ }
+ void Ldaxrh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldaxrh(rt, src);
+ }
+ void Ldnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldnp(rt, rt2, src);
+ }
+ // Provide both double and float interfaces for FP immediate loads, rather
+ // than relying on implicit C++ casts. This allows signalling NaNs to be
+ // preserved when the immediate matches the format of fd. Most systems convert
+ // signalling NaNs to quiet NaNs when converting between float and double.
+ void Ldr(const VRegister& vt, double imm) {
+ SingleEmissionCheckScope guard(this);
+ if (vt.Is64Bits()) {
+ ldr(vt, imm);
+ } else {
+ ldr(vt, static_cast<float>(imm));
+ }
+ }
+ void Ldr(const VRegister& vt, float imm) {
+ SingleEmissionCheckScope guard(this);
+ if (vt.Is32Bits()) {
+ ldr(vt, imm);
+ } else {
+ ldr(vt, static_cast<double>(imm));
+ }
+ }
+ /*
+ void Ldr(const VRegister& vt, uint64_t high64, uint64_t low64) {
+ VIXL_ASSERT(vt.IsQ());
+ SingleEmissionCheckScope guard(this);
+ ldr(vt, new Literal<uint64_t>(high64, low64,
+ &literal_pool_,
+ RawLiteral::kDeletedOnPlacementByPool));
+ }
+ */
+ void Ldr(const Register& rt, uint64_t imm) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ldr(rt, imm);
+ }
+ void Ldrsw(const Register& rt, uint32_t imm) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ldrsw(rt, imm);
+ }
+ void Ldxp(const Register& rt, const Register& rt2, const MemOperand& src) {
+ VIXL_ASSERT(!rt.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ ldxp(rt, rt2, src);
+ }
+ void Ldxr(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxr(rt, src);
+ }
+ void Ldxrb(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxrb(rt, src);
+ }
+ void Ldxrh(const Register& rt, const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ldxrh(rt, src);
+ }
+ void Lsl(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsl(rd, rn, shift);
+ }
+ void Lsl(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lslv(rd, rn, rm);
+ }
+ void Lsr(const Register& rd, const Register& rn, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsr(rd, rn, shift);
+ }
+ void Lsr(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ lsrv(rd, rn, rm);
+ }
+ void Madd(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ madd(rd, rn, rm, ra);
+ }
+ void Mneg(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mneg(rd, rn, rm);
+ }
+ void Mov(const Register& rd, const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ mov(rd, rn);
+ }
+ void Movk(const Register& rd, uint64_t imm, int shift = -1) {
+ VIXL_ASSERT(!rd.IsZero());
+ SingleEmissionCheckScope guard(this);
+ movk(rd, imm, shift);
+ }
+ void Mrs(const Register& rt, SystemRegister sysreg) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mrs(rt, sysreg);
+ }
+ void Msr(SystemRegister sysreg, const Register& rt) {
+ VIXL_ASSERT(!rt.IsZero());
+ SingleEmissionCheckScope guard(this);
+ msr(sysreg, rt);
+ }
+ void Sys(int op1, int crn, int crm, int op2, const Register& rt = xzr) {
+ SingleEmissionCheckScope guard(this);
+ sys(op1, crn, crm, op2, rt);
+ }
+ void Dc(DataCacheOp op, const Register& rt) {
+ SingleEmissionCheckScope guard(this);
+ dc(op, rt);
+ }
+ void Ic(InstructionCacheOp op, const Register& rt) {
+ SingleEmissionCheckScope guard(this);
+ ic(op, rt);
+ }
+ void Msub(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ msub(rd, rn, rm, ra);
+ }
+ void Mul(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ mul(rd, rn, rm);
+ }
+ void Nop() {
+ SingleEmissionCheckScope guard(this);
+ nop();
+ }
+ void Csdb() {
+ SingleEmissionCheckScope guard(this);
+ csdb();
+ }
+ void Rbit(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rbit(rd, rn);
+ }
+ void Ret(const Register& xn = lr) {
+ VIXL_ASSERT(!xn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ret(xn);
+ }
+ void Rev(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev(rd, rn);
+ }
+ void Rev16(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev16(rd, rn);
+ }
+ void Rev32(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rev32(rd, rn);
+ }
+ void Ror(const Register& rd, const Register& rs, unsigned shift) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rs.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ror(rd, rs, shift);
+ }
+ void Ror(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ rorv(rd, rn, rm);
+ }
+ void Sbfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfiz(rd, rn, lsb, width);
+ }
+ void Sbfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfm(rd, rn, immr, imms);
+ }
+ void Sbfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sbfx(rd, rn, lsb, width);
+ }
+ void Scvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ scvtf(vd, rn, fbits);
+ }
+ void Sdiv(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sdiv(rd, rn, rm);
+ }
+ void Smaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smaddl(rd, rn, rm, ra);
+ }
+ void Smsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smsubl(rd, rn, rm, ra);
+ }
+ void Smull(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smull(rd, rn, rm);
+ }
+ void Smulh(const Register& xd, const Register& xn, const Register& xm) {
+ VIXL_ASSERT(!xd.IsZero());
+ VIXL_ASSERT(!xn.IsZero());
+ VIXL_ASSERT(!xm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ smulh(xd, xn, xm);
+ }
+ void Stlr(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlr(rt, dst);
+ }
+ void Stlrb(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlrb(rt, dst);
+ }
+ void Stlrh(const Register& rt, const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stlrh(rt, dst);
+ }
+ void Stlxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ VIXL_ASSERT(!rs.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ stlxp(rs, rt, rt2, dst);
+ }
+ void Stlxr(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxr(rs, rt, dst);
+ }
+ void Stlxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxrb(rs, rt, dst);
+ }
+ void Stlxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stlxrh(rs, rt, dst);
+ }
+ void Stnp(const CPURegister& rt,
+ const CPURegister& rt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ stnp(rt, rt2, dst);
+ }
+ void Stxp(const Register& rs,
+ const Register& rt,
+ const Register& rt2,
+ const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ VIXL_ASSERT(!rs.Aliases(rt2));
+ SingleEmissionCheckScope guard(this);
+ stxp(rs, rt, rt2, dst);
+ }
+ void Stxr(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxr(rs, rt, dst);
+ }
+ void Stxrb(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxrb(rs, rt, dst);
+ }
+ void Stxrh(const Register& rs, const Register& rt, const MemOperand& dst) {
+ VIXL_ASSERT(!rs.Aliases(dst.base()));
+ VIXL_ASSERT(!rs.Aliases(rt));
+ SingleEmissionCheckScope guard(this);
+ stxrh(rs, rt, dst);
+ }
+ void Svc(int code) {
+ SingleEmissionCheckScope guard(this);
+ svc(code);
+ }
+ void Sxtb(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxtb(rd, rn);
+ }
+ void Sxth(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxth(rd, rn);
+ }
+ void Sxtw(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ sxtw(rd, rn);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vn3, vm);
+ }
+ void Tbl(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbl(vd, vn, vn2, vn3, vn4, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vn3, vm);
+ }
+ void Tbx(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vn2,
+ const VRegister& vn3,
+ const VRegister& vn4,
+ const VRegister& vm) {
+ SingleEmissionCheckScope guard(this);
+ tbx(vd, vn, vn2, vn3, vn4, vm);
+ }
+ void Tbnz(const Register& rt, unsigned bit_pos, Label* label);
+ void Tbz(const Register& rt, unsigned bit_pos, Label* label);
+ void Ubfiz(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfiz(rd, rn, lsb, width);
+ }
+ void Ubfm(const Register& rd,
+ const Register& rn,
+ unsigned immr,
+ unsigned imms) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfm(rd, rn, immr, imms);
+ }
+ void Ubfx(const Register& rd,
+ const Register& rn,
+ unsigned lsb,
+ unsigned width) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ubfx(rd, rn, lsb, width);
+ }
+ void Ucvtf(const VRegister& vd, const Register& rn, int fbits = 0) {
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ ucvtf(vd, rn, fbits);
+ }
+ void Udiv(const Register& rd, const Register& rn, const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ udiv(rd, rn, rm);
+ }
+ void Umaddl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umaddl(rd, rn, rm, ra);
+ }
+ void Umull(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umull(rd, rn, rm);
+ }
+ void Umulh(const Register& xd, const Register& xn, const Register& xm) {
+ VIXL_ASSERT(!xd.IsZero());
+ VIXL_ASSERT(!xn.IsZero());
+ VIXL_ASSERT(!xm.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umulh(xd, xn, xm);
+ }
+ void Umsubl(const Register& rd,
+ const Register& rn,
+ const Register& rm,
+ const Register& ra) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ VIXL_ASSERT(!rm.IsZero());
+ VIXL_ASSERT(!ra.IsZero());
+ SingleEmissionCheckScope guard(this);
+ umsubl(rd, rn, rm, ra);
+ }
+
+ void Unreachable() {
+ SingleEmissionCheckScope guard(this);
+ Emit(UNDEFINED_INST_PATTERN);
+ }
+
+ void Uxtb(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxtb(rd, rn);
+ }
+ void Uxth(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxth(rd, rn);
+ }
+ void Uxtw(const Register& rd, const Register& rn) {
+ VIXL_ASSERT(!rd.IsZero());
+ VIXL_ASSERT(!rn.IsZero());
+ SingleEmissionCheckScope guard(this);
+ uxtw(rd, rn);
+ }
+
+ // NEON 3 vector register instructions.
+ #define NEON_3VREG_MACRO_LIST(V) \
+ V(add, Add) \
+ V(addhn, Addhn) \
+ V(addhn2, Addhn2) \
+ V(addp, Addp) \
+ V(and_, And) \
+ V(bic, Bic) \
+ V(bif, Bif) \
+ V(bit, Bit) \
+ V(bsl, Bsl) \
+ V(cmeq, Cmeq) \
+ V(cmge, Cmge) \
+ V(cmgt, Cmgt) \
+ V(cmhi, Cmhi) \
+ V(cmhs, Cmhs) \
+ V(cmtst, Cmtst) \
+ V(eor, Eor) \
+ V(fabd, Fabd) \
+ V(facge, Facge) \
+ V(facgt, Facgt) \
+ V(faddp, Faddp) \
+ V(fcmeq, Fcmeq) \
+ V(fcmge, Fcmge) \
+ V(fcmgt, Fcmgt) \
+ V(fmaxnmp, Fmaxnmp) \
+ V(fmaxp, Fmaxp) \
+ V(fminnmp, Fminnmp) \
+ V(fminp, Fminp) \
+ V(fmla, Fmla) \
+ V(fmls, Fmls) \
+ V(fmulx, Fmulx) \
+ V(frecps, Frecps) \
+ V(frsqrts, Frsqrts) \
+ V(mla, Mla) \
+ V(mls, Mls) \
+ V(mul, Mul) \
+ V(orn, Orn) \
+ V(orr, Orr) \
+ V(pmul, Pmul) \
+ V(pmull, Pmull) \
+ V(pmull2, Pmull2) \
+ V(raddhn, Raddhn) \
+ V(raddhn2, Raddhn2) \
+ V(rsubhn, Rsubhn) \
+ V(rsubhn2, Rsubhn2) \
+ V(saba, Saba) \
+ V(sabal, Sabal) \
+ V(sabal2, Sabal2) \
+ V(sabd, Sabd) \
+ V(sabdl, Sabdl) \
+ V(sabdl2, Sabdl2) \
+ V(saddl, Saddl) \
+ V(saddl2, Saddl2) \
+ V(saddw, Saddw) \
+ V(saddw2, Saddw2) \
+ V(shadd, Shadd) \
+ V(shsub, Shsub) \
+ V(smax, Smax) \
+ V(smaxp, Smaxp) \
+ V(smin, Smin) \
+ V(sminp, Sminp) \
+ V(smlal, Smlal) \
+ V(smlal2, Smlal2) \
+ V(smlsl, Smlsl) \
+ V(smlsl2, Smlsl2) \
+ V(smull, Smull) \
+ V(smull2, Smull2) \
+ V(sqadd, Sqadd) \
+ V(sqdmlal, Sqdmlal) \
+ V(sqdmlal2, Sqdmlal2) \
+ V(sqdmlsl, Sqdmlsl) \
+ V(sqdmlsl2, Sqdmlsl2) \
+ V(sqdmulh, Sqdmulh) \
+ V(sqdmull, Sqdmull) \
+ V(sqdmull2, Sqdmull2) \
+ V(sqrdmulh, Sqrdmulh) \
+ V(sqrshl, Sqrshl) \
+ V(sqshl, Sqshl) \
+ V(sqsub, Sqsub) \
+ V(srhadd, Srhadd) \
+ V(srshl, Srshl) \
+ V(sshl, Sshl) \
+ V(ssubl, Ssubl) \
+ V(ssubl2, Ssubl2) \
+ V(ssubw, Ssubw) \
+ V(ssubw2, Ssubw2) \
+ V(sub, Sub) \
+ V(subhn, Subhn) \
+ V(subhn2, Subhn2) \
+ V(trn1, Trn1) \
+ V(trn2, Trn2) \
+ V(uaba, Uaba) \
+ V(uabal, Uabal) \
+ V(uabal2, Uabal2) \
+ V(uabd, Uabd) \
+ V(uabdl, Uabdl) \
+ V(uabdl2, Uabdl2) \
+ V(uaddl, Uaddl) \
+ V(uaddl2, Uaddl2) \
+ V(uaddw, Uaddw) \
+ V(uaddw2, Uaddw2) \
+ V(uhadd, Uhadd) \
+ V(uhsub, Uhsub) \
+ V(umax, Umax) \
+ V(umaxp, Umaxp) \
+ V(umin, Umin) \
+ V(uminp, Uminp) \
+ V(umlal, Umlal) \
+ V(umlal2, Umlal2) \
+ V(umlsl, Umlsl) \
+ V(umlsl2, Umlsl2) \
+ V(umull, Umull) \
+ V(umull2, Umull2) \
+ V(uqadd, Uqadd) \
+ V(uqrshl, Uqrshl) \
+ V(uqshl, Uqshl) \
+ V(uqsub, Uqsub) \
+ V(urhadd, Urhadd) \
+ V(urshl, Urshl) \
+ V(ushl, Ushl) \
+ V(usubl, Usubl) \
+ V(usubl2, Usubl2) \
+ V(usubw, Usubw) \
+ V(usubw2, Usubw2) \
+ V(uzp1, Uzp1) \
+ V(uzp2, Uzp2) \
+ V(zip1, Zip1) \
+ V(zip2, Zip2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, vm); \
+ }
+ NEON_3VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON 2 vector register instructions.
+ #define NEON_2VREG_MACRO_LIST(V) \
+ V(abs, Abs) \
+ V(addp, Addp) \
+ V(addv, Addv) \
+ V(cls, Cls) \
+ V(clz, Clz) \
+ V(cnt, Cnt) \
+ V(fabs, Fabs) \
+ V(faddp, Faddp) \
+ V(fcvtas, Fcvtas) \
+ V(fcvtau, Fcvtau) \
+ V(fcvtms, Fcvtms) \
+ V(fcvtmu, Fcvtmu) \
+ V(fcvtns, Fcvtns) \
+ V(fcvtnu, Fcvtnu) \
+ V(fcvtps, Fcvtps) \
+ V(fcvtpu, Fcvtpu) \
+ V(fmaxnmp, Fmaxnmp) \
+ V(fmaxnmv, Fmaxnmv) \
+ V(fmaxp, Fmaxp) \
+ V(fmaxv, Fmaxv) \
+ V(fminnmp, Fminnmp) \
+ V(fminnmv, Fminnmv) \
+ V(fminp, Fminp) \
+ V(fminv, Fminv) \
+ V(fneg, Fneg) \
+ V(frecpe, Frecpe) \
+ V(frecpx, Frecpx) \
+ V(frinta, Frinta) \
+ V(frinti, Frinti) \
+ V(frintm, Frintm) \
+ V(frintn, Frintn) \
+ V(frintp, Frintp) \
+ V(frintx, Frintx) \
+ V(frintz, Frintz) \
+ V(frsqrte, Frsqrte) \
+ V(fsqrt, Fsqrt) \
+ V(mov, Mov) \
+ V(mvn, Mvn) \
+ V(neg, Neg) \
+ V(not_, Not) \
+ V(rbit, Rbit) \
+ V(rev16, Rev16) \
+ V(rev32, Rev32) \
+ V(rev64, Rev64) \
+ V(sadalp, Sadalp) \
+ V(saddlp, Saddlp) \
+ V(saddlv, Saddlv) \
+ V(smaxv, Smaxv) \
+ V(sminv, Sminv) \
+ V(sqabs, Sqabs) \
+ V(sqneg, Sqneg) \
+ V(sqxtn, Sqxtn) \
+ V(sqxtn2, Sqxtn2) \
+ V(sqxtun, Sqxtun) \
+ V(sqxtun2, Sqxtun2) \
+ V(suqadd, Suqadd) \
+ V(sxtl, Sxtl) \
+ V(sxtl2, Sxtl2) \
+ V(uadalp, Uadalp) \
+ V(uaddlp, Uaddlp) \
+ V(uaddlv, Uaddlv) \
+ V(umaxv, Umaxv) \
+ V(uminv, Uminv) \
+ V(uqxtn, Uqxtn) \
+ V(uqxtn2, Uqxtn2) \
+ V(urecpe, Urecpe) \
+ V(ursqrte, Ursqrte) \
+ V(usqadd, Usqadd) \
+ V(uxtl, Uxtl) \
+ V(uxtl2, Uxtl2) \
+ V(xtn, Xtn) \
+ V(xtn2, Xtn2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn); \
+ }
+ NEON_2VREG_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON 2 vector register with immediate instructions.
+ #define NEON_2VREG_FPIMM_MACRO_LIST(V) \
+ V(fcmeq, Fcmeq) \
+ V(fcmge, Fcmge) \
+ V(fcmgt, Fcmgt) \
+ V(fcmle, Fcmle) \
+ V(fcmlt, Fcmlt)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ double imm) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, imm); \
+ }
+ NEON_2VREG_FPIMM_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ // NEON by element instructions.
+ #define NEON_BYELEMENT_MACRO_LIST(V) \
+ V(fmul, Fmul) \
+ V(fmla, Fmla) \
+ V(fmls, Fmls) \
+ V(fmulx, Fmulx) \
+ V(mul, Mul) \
+ V(mla, Mla) \
+ V(mls, Mls) \
+ V(sqdmulh, Sqdmulh) \
+ V(sqrdmulh, Sqrdmulh) \
+ V(sqdmull, Sqdmull) \
+ V(sqdmull2, Sqdmull2) \
+ V(sqdmlal, Sqdmlal) \
+ V(sqdmlal2, Sqdmlal2) \
+ V(sqdmlsl, Sqdmlsl) \
+ V(sqdmlsl2, Sqdmlsl2) \
+ V(smull, Smull) \
+ V(smull2, Smull2) \
+ V(smlal, Smlal) \
+ V(smlal2, Smlal2) \
+ V(smlsl, Smlsl) \
+ V(smlsl2, Smlsl2) \
+ V(umull, Umull) \
+ V(umull2, Umull2) \
+ V(umlal, Umlal) \
+ V(umlal2, Umlal2) \
+ V(umlsl, Umlsl) \
+ V(umlsl2, Umlsl2)
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ const VRegister& vm, \
+ int vm_index \
+ ) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, vm, vm_index); \
+ }
+ NEON_BYELEMENT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ #define NEON_2VREG_SHIFT_MACRO_LIST(V) \
+ V(rshrn, Rshrn) \
+ V(rshrn2, Rshrn2) \
+ V(shl, Shl) \
+ V(shll, Shll) \
+ V(shll2, Shll2) \
+ V(shrn, Shrn) \
+ V(shrn2, Shrn2) \
+ V(sli, Sli) \
+ V(sqrshrn, Sqrshrn) \
+ V(sqrshrn2, Sqrshrn2) \
+ V(sqrshrun, Sqrshrun) \
+ V(sqrshrun2, Sqrshrun2) \
+ V(sqshl, Sqshl) \
+ V(sqshlu, Sqshlu) \
+ V(sqshrn, Sqshrn) \
+ V(sqshrn2, Sqshrn2) \
+ V(sqshrun, Sqshrun) \
+ V(sqshrun2, Sqshrun2) \
+ V(sri, Sri) \
+ V(srshr, Srshr) \
+ V(srsra, Srsra) \
+ V(sshll, Sshll) \
+ V(sshll2, Sshll2) \
+ V(sshr, Sshr) \
+ V(ssra, Ssra) \
+ V(uqrshrn, Uqrshrn) \
+ V(uqrshrn2, Uqrshrn2) \
+ V(uqshl, Uqshl) \
+ V(uqshrn, Uqshrn) \
+ V(uqshrn2, Uqshrn2) \
+ V(urshr, Urshr) \
+ V(ursra, Ursra) \
+ V(ushll, Ushll) \
+ V(ushll2, Ushll2) \
+ V(ushr, Ushr) \
+ V(usra, Usra) \
+
+ #define DEFINE_MACRO_ASM_FUNC(ASM, MASM) \
+ void MASM(const VRegister& vd, \
+ const VRegister& vn, \
+ int shift) { \
+ SingleEmissionCheckScope guard(this); \
+ ASM(vd, vn, shift); \
+ }
+ NEON_2VREG_SHIFT_MACRO_LIST(DEFINE_MACRO_ASM_FUNC)
+ #undef DEFINE_MACRO_ASM_FUNC
+
+ void Bic(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0) {
+ SingleEmissionCheckScope guard(this);
+ bic(vd, imm8, left_shift);
+ }
+ void Cmeq(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmeq(vd, vn, imm);
+ }
+ void Cmge(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmge(vd, vn, imm);
+ }
+ void Cmgt(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmgt(vd, vn, imm);
+ }
+ void Cmle(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmle(vd, vn, imm);
+ }
+ void Cmlt(const VRegister& vd,
+ const VRegister& vn,
+ int imm) {
+ SingleEmissionCheckScope guard(this);
+ cmlt(vd, vn, imm);
+ }
+ void Dup(const VRegister& vd,
+ const VRegister& vn,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ dup(vd, vn, index);
+ }
+ void Dup(const VRegister& vd,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ dup(vd, rn);
+ }
+ void Ext(const VRegister& vd,
+ const VRegister& vn,
+ const VRegister& vm,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ ext(vd, vn, vm, index);
+ }
+ void Ins(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ ins(vd, vd_index, vn, vn_index);
+ }
+ void Ins(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ ins(vd, vd_index, rn);
+ }
+ void Ld1(const VRegister& vt,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, vt3, src);
+ }
+ void Ld1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, vt2, vt3, vt4, src);
+ }
+ void Ld1(const VRegister& vt,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1(vt, lane, src);
+ }
+ void Ld1r(const VRegister& vt,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld1r(vt, src);
+ }
+ void Ld2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2(vt, vt2, src);
+ }
+ void Ld2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2(vt, vt2, lane, src);
+ }
+ void Ld2r(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld2r(vt, vt2, src);
+ }
+ void Ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3(vt, vt2, vt3, src);
+ }
+ void Ld3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3(vt, vt2, vt3, lane, src);
+ }
+ void Ld3r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld3r(vt, vt2, vt3, src);
+ }
+ void Ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4(vt, vt2, vt3, vt4, src);
+ }
+ void Ld4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4(vt, vt2, vt3, vt4, lane, src);
+ }
+ void Ld4r(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& src) {
+ SingleEmissionCheckScope guard(this);
+ ld4r(vt, vt2, vt3, vt4, src);
+ }
+ void Mov(const VRegister& vd,
+ int vd_index,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vd_index, vn, vn_index);
+ }
+ void Mov(const VRegister& vd,
+ const VRegister& vn,
+ int index) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vn, index);
+ }
+ void Mov(const VRegister& vd,
+ int vd_index,
+ const Register& rn) {
+ SingleEmissionCheckScope guard(this);
+ mov(vd, vd_index, rn);
+ }
+ void Mov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ mov(rd, vn, vn_index);
+ }
+ void Movi(const VRegister& vd,
+ uint64_t imm,
+ Shift shift = LSL,
+ int shift_amount = 0);
+ void Movi(const VRegister& vd, uint64_t hi, uint64_t lo);
+ void Mvni(const VRegister& vd,
+ const int imm8,
+ Shift shift = LSL,
+ const int shift_amount = 0) {
+ SingleEmissionCheckScope guard(this);
+ mvni(vd, imm8, shift, shift_amount);
+ }
+ void Orr(const VRegister& vd,
+ const int imm8,
+ const int left_shift = 0) {
+ SingleEmissionCheckScope guard(this);
+ orr(vd, imm8, left_shift);
+ }
+ void Scvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ scvtf(vd, vn, fbits);
+ }
+ void Ucvtf(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ ucvtf(vd, vn, fbits);
+ }
+ void Fcvtzs(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ fcvtzs(vd, vn, fbits);
+ }
+ void Fcvtzu(const VRegister& vd,
+ const VRegister& vn,
+ int fbits = 0) {
+ SingleEmissionCheckScope guard(this);
+ fcvtzu(vd, vn, fbits);
+ }
+ void St1(const VRegister& vt,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, vt3, dst);
+ }
+ void St1(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, vt2, vt3, vt4, dst);
+ }
+ void St1(const VRegister& vt,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st1(vt, lane, dst);
+ }
+ void St2(const VRegister& vt,
+ const VRegister& vt2,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st2(vt, vt2, dst);
+ }
+ void St3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st3(vt, vt2, vt3, dst);
+ }
+ void St4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st4(vt, vt2, vt3, vt4, dst);
+ }
+ void St2(const VRegister& vt,
+ const VRegister& vt2,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st2(vt, vt2, lane, dst);
+ }
+ void St3(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st3(vt, vt2, vt3, lane, dst);
+ }
+ void St4(const VRegister& vt,
+ const VRegister& vt2,
+ const VRegister& vt3,
+ const VRegister& vt4,
+ int lane,
+ const MemOperand& dst) {
+ SingleEmissionCheckScope guard(this);
+ st4(vt, vt2, vt3, vt4, lane, dst);
+ }
+ void Smov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ smov(rd, vn, vn_index);
+ }
+ void Umov(const Register& rd,
+ const VRegister& vn,
+ int vn_index) {
+ SingleEmissionCheckScope guard(this);
+ umov(rd, vn, vn_index);
+ }
+ void Crc32b(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32b(rd, rn, rm);
+ }
+ void Crc32h(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32h(rd, rn, rm);
+ }
+ void Crc32w(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32w(rd, rn, rm);
+ }
+ void Crc32x(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32x(rd, rn, rm);
+ }
+ void Crc32cb(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cb(rd, rn, rm);
+ }
+ void Crc32ch(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32ch(rd, rn, rm);
+ }
+ void Crc32cw(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cw(rd, rn, rm);
+ }
+ void Crc32cx(const Register& rd,
+ const Register& rn,
+ const Register& rm) {
+ SingleEmissionCheckScope guard(this);
+ crc32cx(rd, rn, rm);
+ }
+
+ // Push the system stack pointer (sp) down to allow the same to be done to
+ // the current stack pointer (according to StackPointer()). This must be
+ // called _before_ accessing the memory.
+ //
+ // This is necessary when pushing or otherwise adding things to the stack, to
+ // satisfy the AAPCS64 constraint that the memory below the system stack
+ // pointer is not accessed.
+ //
+ // This method asserts that StackPointer() is not sp, since the call does
+ // not make sense in that context.
+ //
+ // TODO: This method can only accept values of 'space' that can be encoded in
+ // one instruction. Refer to the implementation for details.
+ void BumpSystemStackPointer(const Operand& space);
+
+ // Set the current stack pointer, but don't generate any code.
+ void SetStackPointer64(const Register& stack_pointer) {
+ VIXL_ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
+ sp_ = stack_pointer;
+ }
+
+ // Return the current stack pointer, as set by SetStackPointer.
+ const Register& StackPointer() const {
+ return sp_;
+ }
+
+ const Register& GetStackPointer64() const {
+ return sp_;
+ }
+
+ js::jit::RegisterOrSP getStackPointer() const {
+ return js::jit::RegisterOrSP(sp_.code());
+ }
+
+ CPURegList* TmpList() { return &tmp_list_; }
+ CPURegList* FPTmpList() { return &fptmp_list_; }
+
+ // Trace control when running the debug simulator.
+ //
+ // For example:
+ //
+ // __ Trace(LOG_REGS, TRACE_ENABLE);
+ // Will add registers to the trace if it wasn't already the case.
+ //
+ // __ Trace(LOG_DISASM, TRACE_DISABLE);
+ // Will stop logging disassembly. It has no effect if the disassembly wasn't
+ // already being logged.
+ void Trace(TraceParameters parameters, TraceCommand command);
+
+ // Log the requested data independently of what is being traced.
+ //
+ // For example:
+ //
+ // __ Log(LOG_FLAGS)
+ // Will output the flags.
+ void Log(TraceParameters parameters);
+
+ // Enable or disable instrumentation when an Instrument visitor is attached to
+ // the simulator.
+ void EnableInstrumentation();
+ void DisableInstrumentation();
+
+ // Add a marker to the instrumentation data produced by an Instrument visitor.
+ // The name is a two character string that will be attached to the marker in
+ // the output data.
+ void AnnotateInstrumentation(const char* marker_name);
+
+ private:
+ // The actual Push and Pop implementations. These don't generate any code
+ // other than that required for the push or pop. This allows
+ // (Push|Pop)CPURegList to bundle together setup code for a large block of
+ // registers.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PushHelper(int count, int size,
+ const CPURegister& src0, const CPURegister& src1,
+ const CPURegister& src2, const CPURegister& src3);
+ void PopHelper(int count, int size,
+ const CPURegister& dst0, const CPURegister& dst1,
+ const CPURegister& dst2, const CPURegister& dst3);
+
+ void Movi16bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi32bitHelper(const VRegister& vd, uint64_t imm);
+ void Movi64bitHelper(const VRegister& vd, uint64_t imm);
+
+ // Perform necessary maintenance operations before a push or pop.
+ //
+ // Note that size is per register, and is specified in bytes.
+ void PrepareForPush(int count, int size);
+ void PrepareForPop(int count, int size);
+
+ // The actual implementation of load and store operations for CPURegList.
+ enum LoadStoreCPURegListAction {
+ kLoad,
+ kStore
+ };
+ void LoadStoreCPURegListHelper(LoadStoreCPURegListAction operation,
+ CPURegList registers,
+ const MemOperand& mem);
+ // Returns a MemOperand suitable for loading or storing a CPURegList at `dst`.
+ // This helper may allocate registers from `scratch_scope` and generate code
+ // to compute an intermediate address. The resulting MemOperand is only valid
+ // as long as `scratch_scope` remains valid.
+ MemOperand BaseMemOperandForLoadStoreCPURegList(
+ const CPURegList& registers,
+ const MemOperand& mem,
+ UseScratchRegisterScope* scratch_scope);
+
+ bool LabelIsOutOfRange(Label* label, ImmBranchType branch_type) {
+ return !Instruction::IsValidImmPCOffset(branch_type, nextOffset().getOffset() - label->offset());
+ }
+
+ // The register to use as a stack pointer for stack operations.
+ Register sp_;
+
+ // Scratch registers available for use by the MacroAssembler.
+ CPURegList tmp_list_;
+ CPURegList fptmp_list_;
+
+ ptrdiff_t checkpoint_;
+ ptrdiff_t recommended_checkpoint_;
+};
+
+
+// All Assembler emits MUST acquire/release the underlying code buffer. The
+// helper scope below will do so and optionally ensure the buffer is big enough
+// to receive the emit. It is possible to request the scope not to perform any
+// checks (kNoCheck) if for example it is known in advance the buffer size is
+// adequate or there is some other size checking mechanism in place.
+class CodeBufferCheckScope {
+ public:
+ // Tell whether or not the scope needs to ensure the associated CodeBuffer
+ // has enough space for the requested size.
+ enum CheckPolicy {
+ kNoCheck,
+ kCheck
+ };
+
+ // Tell whether or not the scope should assert the amount of code emitted
+ // within the scope is consistent with the requested amount.
+ enum AssertPolicy {
+ kNoAssert, // No assert required.
+ kExactSize, // The code emitted must be exactly size bytes.
+ kMaximumSize // The code emitted must be at most size bytes.
+ };
+
+ CodeBufferCheckScope(Assembler* assm,
+ size_t size,
+ CheckPolicy check_policy = kCheck,
+ AssertPolicy assert_policy = kMaximumSize)
+ { }
+
+ // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
+ explicit CodeBufferCheckScope(Assembler* assm) {}
+};
+
+
+// Use this scope when you need a one-to-one mapping between methods and
+// instructions. This scope prevents the MacroAssembler from being called and
+// literal pools from being emitted. It also asserts the number of instructions
+// emitted is what you specified when creating the scope.
+// FIXME: Because of the disabled calls below, this class asserts nothing.
+class InstructionAccurateScope : public CodeBufferCheckScope {
+ public:
+ InstructionAccurateScope(MacroAssembler* masm,
+ int64_t count,
+ AssertPolicy policy = kExactSize)
+ : CodeBufferCheckScope(masm,
+ (count * kInstructionSize),
+ kCheck,
+ policy) {
+ }
+};
+
+
+// This scope utility allows scratch registers to be managed safely. The
+// MacroAssembler's TmpList() (and FPTmpList()) is used as a pool of scratch
+// registers. These registers can be allocated on demand, and will be returned
+// at the end of the scope.
+//
+// When the scope ends, the MacroAssembler's lists will be restored to their
+// original state, even if the lists were modified by some other means.
+class UseScratchRegisterScope {
+ public:
+ // This constructor implicitly calls the `Open` function to initialise the
+ // scope, so it is ready to use immediately after it has been constructed.
+ explicit UseScratchRegisterScope(MacroAssembler* masm);
+ // This constructor allows deferred and optional initialisation of the scope.
+ // The user is required to explicitly call the `Open` function before using
+ // the scope.
+ UseScratchRegisterScope();
+ // This function performs the actual initialisation work.
+ void Open(MacroAssembler* masm);
+
+ // The destructor always implicitly calls the `Close` function.
+ ~UseScratchRegisterScope();
+ // This function performs the cleaning-up work. It must succeed even if the
+ // scope has not been opened. It is safe to call multiple times.
+ void Close();
+
+
+ bool IsAvailable(const CPURegister& reg) const;
+
+
+ // Take a register from the appropriate temps list. It will be returned
+ // automatically when the scope ends.
+ Register AcquireW() { return AcquireNextAvailable(available_).W(); }
+ Register AcquireX() { return AcquireNextAvailable(available_).X(); }
+ VRegister AcquireS() { return AcquireNextAvailable(availablefp_).S(); }
+ VRegister AcquireD() { return AcquireNextAvailable(availablefp_).D(); }
+ VRegister AcquireQ() { return AcquireNextAvailable(availablefp_).Q(); }
+
+
+ Register AcquireSameSizeAs(const Register& reg);
+ VRegister AcquireSameSizeAs(const VRegister& reg);
+
+
+ // Explicitly release an acquired (or excluded) register, putting it back in
+ // the appropriate temps list.
+ void Release(const CPURegister& reg);
+
+
+ // Make the specified registers available as scratch registers for the
+ // duration of this scope.
+ void Include(const CPURegList& list);
+ void Include(const Register& reg1,
+ const Register& reg2 = NoReg,
+ const Register& reg3 = NoReg,
+ const Register& reg4 = NoReg);
+ void Include(const VRegister& reg1,
+ const VRegister& reg2 = NoVReg,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+
+
+ // Make sure that the specified registers are not available in this scope.
+ // This can be used to prevent helper functions from using sensitive
+ // registers, for example.
+ void Exclude(const CPURegList& list);
+ void Exclude(const Register& reg1,
+ const Register& reg2 = NoReg,
+ const Register& reg3 = NoReg,
+ const Register& reg4 = NoReg);
+ void Exclude(const VRegister& reg1,
+ const VRegister& reg2 = NoVReg,
+ const VRegister& reg3 = NoVReg,
+ const VRegister& reg4 = NoVReg);
+ void Exclude(const CPURegister& reg1,
+ const CPURegister& reg2 = NoCPUReg,
+ const CPURegister& reg3 = NoCPUReg,
+ const CPURegister& reg4 = NoCPUReg);
+
+
+ // Prevent any scratch registers from being used in this scope.
+ void ExcludeAll();
+
+
+ private:
+ static CPURegister AcquireNextAvailable(CPURegList* available);
+
+ static void ReleaseByCode(CPURegList* available, int code);
+
+ static void ReleaseByRegList(CPURegList* available,
+ RegList regs);
+
+ static void IncludeByRegList(CPURegList* available,
+ RegList exclude);
+
+ static void ExcludeByRegList(CPURegList* available,
+ RegList exclude);
+
+ // Available scratch registers.
+ CPURegList* available_; // kRegister
+ CPURegList* availablefp_; // kVRegister
+
+ // The state of the available lists at the start of this scope.
+ RegList old_available_; // kRegister
+ RegList old_availablefp_; // kVRegister
+#ifdef DEBUG
+ bool initialised_;
+#endif
+
+ // Disallow copy constructor and operator=.
+ UseScratchRegisterScope(const UseScratchRegisterScope&) {
+ VIXL_UNREACHABLE();
+ }
+ void operator=(const UseScratchRegisterScope&) {
+ VIXL_UNREACHABLE();
+ }
+};
+
+
+} // namespace vixl
+
+#endif // VIXL_A64_MACRO_ASSEMBLER_A64_H_
diff --git a/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
new file mode 100644
index 0000000000..b9189cc23b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozAssembler-vixl.cpp
@@ -0,0 +1,610 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/Label.h"
+
+namespace vixl {
+
+using LabelDoc = js::jit::DisassemblerSpew::LabelDoc;
+
+// Assembler
+void Assembler::FinalizeCode() {
+#ifdef DEBUG
+ finalized_ = true;
+#endif
+}
+
+// Unbound Label Representation.
+//
+// We can have multiple branches using the same label before it is bound.
+// Assembler::bind() must then be able to enumerate all the branches and patch
+// them to target the final label location.
+//
+// When a Label is unbound with uses, its offset is pointing to the tip of a
+// linked list of uses. The uses can be branches or adr/adrp instructions. In
+// the case of branches, the next member in the linked list is simply encoded
+// as the branch target. For adr/adrp, the relative pc offset is encoded in the
+// immediate field as a signed instruction offset.
+//
+// In both cases, the end of the list is encoded as a 0 pc offset, i.e. the
+// tail is pointing to itself.
+
+static const ptrdiff_t kEndOfLabelUseList = 0;
+
+BufferOffset
+MozBaseAssembler::NextLink(BufferOffset cur)
+{
+ Instruction* link = getInstructionAt(cur);
+ // Raw encoded offset.
+ ptrdiff_t offset = link->ImmPCRawOffset();
+ // End of the list is encoded as 0.
+ if (offset == kEndOfLabelUseList)
+ return BufferOffset();
+ // The encoded offset is the number of instructions to move.
+ return BufferOffset(cur.getOffset() + offset * kInstructionSize);
+}
+
+static ptrdiff_t
+EncodeOffset(BufferOffset cur, BufferOffset next)
+{
+ MOZ_ASSERT(next.assigned() && cur.assigned());
+ ptrdiff_t offset = next.getOffset() - cur.getOffset();
+ MOZ_ASSERT(offset % kInstructionSize == 0);
+ return offset / kInstructionSize;
+}
+
+void
+MozBaseAssembler::SetNextLink(BufferOffset cur, BufferOffset next)
+{
+ Instruction* link = getInstructionAt(cur);
+ link->SetImmPCRawOffset(EncodeOffset(cur, next));
+}
+
+// A common implementation for the LinkAndGet<Type>OffsetTo helpers.
+//
+// If the label is bound, returns the offset as a multiple of 1 << elementShift.
+// Otherwise, links the instruction to the label and returns the raw offset to
+// encode. (This will be an instruction count.)
+//
+// The offset is calculated by aligning the PC and label addresses down to a
+// multiple of 1 << elementShift, then calculating the (scaled) offset between
+// them. This matches the semantics of adrp, for example. (Assuming that the
+// assembler buffer is page-aligned, which it probably isn't.)
+//
+// For an unbound label, the returned offset will be encodable in the provided
+// branch range. If the label is already bound, the caller is expected to make
+// sure that it is in range, and emit the necessary branch instrutions if it
+// isn't.
+//
+ptrdiff_t
+MozBaseAssembler::LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ unsigned elementShift, Label* label)
+{
+ if (armbuffer_.oom())
+ return kEndOfLabelUseList;
+
+ if (label->bound()) {
+ // The label is bound: all uses are already linked.
+ ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() >> elementShift);
+ ptrdiff_t label_offset = ptrdiff_t(label->offset() >> elementShift);
+ return label_offset - branch_offset;
+ }
+
+ // Keep track of short-range branches targeting unbound labels. We may need
+ // to insert veneers in PatchShortRangeBranchToVeneer() below.
+ if (branchRange < NumShortBranchRangeTypes) {
+ // This is the last possible branch target.
+ BufferOffset deadline(branch.getOffset() +
+ Instruction::ImmBranchMaxForwardOffset(branchRange));
+ armbuffer_.registerBranchDeadline(branchRange, deadline);
+ }
+
+ // The label is unbound and previously unused: Store the offset in the label
+ // itself for patching by bind().
+ if (!label->used()) {
+ label->use(branch.getOffset());
+ return kEndOfLabelUseList;
+ }
+
+ // The label is unbound and has multiple users. Create a linked list between
+ // the branches, and update the linked list head in the label struct. This is
+ // not always trivial since the branches in the linked list have limited
+ // ranges.
+
+ // What is the earliest buffer offset that would be reachable by the branch
+ // we're about to add?
+ ptrdiff_t earliestReachable =
+ branch.getOffset() + Instruction::ImmBranchMinBackwardOffset(branchRange);
+
+ // If the existing instruction at the head of the list is within reach of the
+ // new branch, we can simply insert the new branch at the front of the list.
+ if (label->offset() >= earliestReachable) {
+ ptrdiff_t offset = EncodeOffset(branch, BufferOffset(label));
+ label->use(branch.getOffset());
+ MOZ_ASSERT(offset != kEndOfLabelUseList);
+ return offset;
+ }
+
+ // The label already has a linked list of uses, but we can't reach the head
+ // of the list with the allowed branch range. Insert this branch at a
+ // different position in the list.
+ //
+ // Find an existing branch, exbr, such that:
+ //
+ // 1. The new branch can be reached by exbr, and either
+ // 2a. The new branch can reach exbr's target, or
+ // 2b. The exbr branch is at the end of the list.
+ //
+ // Then the new branch can be inserted after exbr in the linked list.
+ //
+ // We know that it is always possible to find an exbr branch satisfying these
+ // conditions because of the PatchShortRangeBranchToVeneer() mechanism. All
+ // branches are guaranteed to either be able to reach the end of the
+ // assembler buffer, or they will be pointing to an unconditional branch that
+ // can.
+ //
+ // In particular, the end of the list is always a viable candidate, so we'll
+ // just get that.
+ BufferOffset next(label);
+ BufferOffset exbr;
+ do {
+ exbr = next;
+ next = NextLink(next);
+ } while (next.assigned());
+ SetNextLink(exbr, branch);
+
+ // This branch becomes the new end of the list.
+ return kEndOfLabelUseList;
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetByteOffsetTo(BufferOffset branch, Label* label) {
+ return LinkAndGetOffsetTo(branch, UncondBranchRangeType, 0, label);
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetInstructionOffsetTo(BufferOffset branch,
+ ImmBranchRangeType branchRange,
+ Label* label) {
+ return LinkAndGetOffsetTo(branch, branchRange, kInstructionSizeLog2, label);
+}
+
+ptrdiff_t MozBaseAssembler::LinkAndGetPageOffsetTo(BufferOffset branch, Label* label) {
+ return LinkAndGetOffsetTo(branch, UncondBranchRangeType, kPageSizeLog2, label);
+}
+
+BufferOffset Assembler::b(int imm26, const LabelDoc& doc) {
+ return EmitBranch(B | ImmUncondBranch(imm26), doc);
+}
+
+
+void Assembler::b(Instruction* at, int imm26) {
+ return EmitBranch(at, B | ImmUncondBranch(imm26));
+}
+
+
+BufferOffset Assembler::b(int imm19, Condition cond, const LabelDoc& doc) {
+ return EmitBranch(B_cond | ImmCondBranch(imm19) | cond, doc);
+}
+
+
+void Assembler::b(Instruction* at, int imm19, Condition cond) {
+ EmitBranch(at, B_cond | ImmCondBranch(imm19) | cond);
+}
+
+
+BufferOffset Assembler::b(Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label), doc);
+}
+
+
+BufferOffset Assembler::b(Label* label, Condition cond) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return b(LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), cond, doc);
+}
+
+void Assembler::br(Instruction* at, const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ // No need for EmitBranch(): no immediate offset needs fixing.
+ Emit(at, BR | Rn(xn));
+}
+
+
+void Assembler::blr(Instruction* at, const Register& xn) {
+ VIXL_ASSERT(xn.Is64Bits());
+ // No need for EmitBranch(): no immediate offset needs fixing.
+ Emit(at, BLR | Rn(xn));
+}
+
+
+void Assembler::bl(int imm26, const LabelDoc& doc) {
+ EmitBranch(BL | ImmUncondBranch(imm26), doc);
+}
+
+
+void Assembler::bl(Instruction* at, int imm26) {
+ EmitBranch(at, BL | ImmUncondBranch(imm26));
+}
+
+
+void Assembler::bl(Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return bl(LinkAndGetInstructionOffsetTo(nextInstrOffset(), UncondBranchRangeType, label), doc);
+}
+
+
+void Assembler::cbz(const Register& rt, int imm19, const LabelDoc& doc) {
+ EmitBranch(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt), doc);
+}
+
+
+void Assembler::cbz(Instruction* at, const Register& rt, int imm19) {
+ EmitBranch(at, SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbz(const Register& rt, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return cbz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), doc);
+}
+
+
+void Assembler::cbnz(const Register& rt, int imm19, const LabelDoc& doc) {
+ EmitBranch(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt), doc);
+}
+
+
+void Assembler::cbnz(Instruction* at, const Register& rt, int imm19) {
+ EmitBranch(at, SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
+}
+
+
+void Assembler::cbnz(const Register& rt, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return cbnz(rt, LinkAndGetInstructionOffsetTo(nextInstrOffset(), CondBranchRangeType, label), doc);
+}
+
+
+void Assembler::tbz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt), doc);
+}
+
+
+void Assembler::tbz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(at, TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbz(const Register& rt, unsigned bit_pos, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label), doc);
+}
+
+
+void Assembler::tbnz(const Register& rt, unsigned bit_pos, int imm14, const LabelDoc& doc) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt), doc);
+}
+
+
+void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) {
+ VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize)));
+ EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
+}
+
+
+void Assembler::tbnz(const Register& rt, unsigned bit_pos, Label* label) {
+ // Encode the relative offset from the inserted branch to the label.
+ LabelDoc doc = refLabel(label);
+ return tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(nextInstrOffset(), TestBranchRangeType, label), doc);
+}
+
+
+void Assembler::adr(const Register& rd, int imm21, const LabelDoc& doc) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(ADR | ImmPCRelAddress(imm21) | Rd(rd), doc);
+}
+
+
+void Assembler::adr(Instruction* at, const Register& rd, int imm21) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(at, ADR | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adr(const Register& rd, Label* label) {
+ // Encode the relative offset from the inserted adr to the label.
+ LabelDoc doc = refLabel(label);
+ return adr(rd, LinkAndGetByteOffsetTo(nextInstrOffset(), label), doc);
+}
+
+
+void Assembler::adrp(const Register& rd, int imm21, const LabelDoc& doc) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(ADRP | ImmPCRelAddress(imm21) | Rd(rd), doc);
+}
+
+
+void Assembler::adrp(Instruction* at, const Register& rd, int imm21) {
+ VIXL_ASSERT(rd.Is64Bits());
+ EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd));
+}
+
+
+void Assembler::adrp(const Register& rd, Label* label) {
+ VIXL_ASSERT(AllowPageOffsetDependentCode());
+ // Encode the relative offset from the inserted adr to the label.
+ LabelDoc doc = refLabel(label);
+ return adrp(rd, LinkAndGetPageOffsetTo(nextInstrOffset(), label), doc);
+}
+
+
+BufferOffset Assembler::ands(const Register& rd, const Register& rn, const Operand& operand) {
+ return Logical(rd, rn, operand, ANDS);
+}
+
+
+BufferOffset Assembler::tst(const Register& rn, const Operand& operand) {
+ return ands(AppropriateZeroRegFor(rn), rn, operand);
+}
+
+
+void Assembler::ldr(Instruction* at, const CPURegister& rt, int imm19) {
+ LoadLiteralOp op = LoadLiteralOpFor(rt);
+ Emit(at, op | ImmLLiteral(imm19) | Rt(rt));
+}
+
+
+BufferOffset Assembler::hint(SystemHint code) {
+ return Emit(HINT | ImmHint(code));
+}
+
+
+void Assembler::hint(Instruction* at, SystemHint code) {
+ Emit(at, HINT | ImmHint(code));
+}
+
+
+void Assembler::svc(Instruction* at, int code) {
+ VIXL_ASSERT(IsUint16(code));
+ Emit(at, SVC | ImmException(code));
+}
+
+
+void Assembler::nop(Instruction* at) {
+ hint(at, NOP);
+}
+
+
+void Assembler::csdb(Instruction* at) {
+ hint(at, CSDB);
+}
+
+
+BufferOffset Assembler::Logical(const Register& rd, const Register& rn,
+ const Operand& operand, LogicalOp op)
+{
+ VIXL_ASSERT(rd.size() == rn.size());
+ if (operand.IsImmediate()) {
+ int64_t immediate = operand.immediate();
+ unsigned reg_size = rd.size();
+
+ VIXL_ASSERT(immediate != 0);
+ VIXL_ASSERT(immediate != -1);
+ VIXL_ASSERT(rd.Is64Bits() || IsUint32(immediate));
+
+ // If the operation is NOT, invert the operation and immediate.
+ if ((op & NOT) == NOT) {
+ op = static_cast<LogicalOp>(op & ~NOT);
+ immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
+ }
+
+ unsigned n, imm_s, imm_r;
+ if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
+ // Immediate can be encoded in the instruction.
+ return LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
+ } else {
+ // This case is handled in the macro assembler.
+ VIXL_UNREACHABLE();
+ }
+ } else {
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ VIXL_ASSERT(operand.reg().size() == rd.size());
+ Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
+ return DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
+ }
+}
+
+
+BufferOffset Assembler::LogicalImmediate(const Register& rd, const Register& rn,
+ unsigned n, unsigned imm_s, unsigned imm_r, LogicalOp op)
+{
+ unsigned reg_size = rd.size();
+ Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
+ return Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
+ ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg | Rn(rn));
+}
+
+
+BufferOffset Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn,
+ const Operand& operand, FlagsUpdate S, Instr op)
+{
+ VIXL_ASSERT(operand.IsShiftedRegister());
+ VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && IsUint5(operand.shift_amount())));
+ return Emit(SF(rd) | op | Flags(S) |
+ ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
+ Rm(operand.reg()) | Rn(rn) | Rd(rd));
+}
+
+
+void MozBaseAssembler::InsertIndexIntoTag(uint8_t* load, uint32_t index) {
+ // Store the js::jit::PoolEntry index into the instruction.
+ // finishPool() will walk over all literal load instructions
+ // and use PatchConstantPoolLoad() to patch to the final relative offset.
+ *((uint32_t*)load) |= Assembler::ImmLLiteral(index);
+}
+
+
+bool MozBaseAssembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
+ Instruction* load = reinterpret_cast<Instruction*>(loadAddr);
+
+ // The load currently contains the js::jit::PoolEntry's index,
+ // as written by InsertIndexIntoTag().
+ uint32_t index = load->ImmLLiteral();
+
+ // Each entry in the literal pool is uint32_t-sized,
+ // but literals may use multiple entries.
+ uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr);
+ Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]);
+
+ load->SetImmLLiteral(source);
+ return false; // Nothing uses the return value.
+}
+
+void
+MozBaseAssembler::PatchShortRangeBranchToVeneer(ARMBuffer* buffer, unsigned rangeIdx,
+ BufferOffset deadline, BufferOffset veneer)
+{
+ // Reconstruct the position of the branch from (rangeIdx, deadline).
+ vixl::ImmBranchRangeType branchRange = static_cast<vixl::ImmBranchRangeType>(rangeIdx);
+ BufferOffset branch(deadline.getOffset() - Instruction::ImmBranchMaxForwardOffset(branchRange));
+ Instruction *branchInst = buffer->getInst(branch);
+ Instruction *veneerInst = buffer->getInst(veneer);
+
+ // Verify that the branch range matches what's encoded.
+ MOZ_ASSERT(Instruction::ImmBranchTypeToRange(branchInst->BranchType()) == branchRange);
+
+ // We want to insert veneer after branch in the linked list of instructions
+ // that use the same unbound label.
+ // The veneer should be an unconditional branch.
+ ptrdiff_t nextElemOffset = branchInst->ImmPCRawOffset();
+
+ // If offset is 0, this is the end of the linked list.
+ if (nextElemOffset != kEndOfLabelUseList) {
+ // Make the offset relative to veneer so it targets the same instruction
+ // as branchInst.
+ nextElemOffset *= kInstructionSize;
+ nextElemOffset += branch.getOffset() - veneer.getOffset();
+ nextElemOffset /= kInstructionSize;
+ }
+ Assembler::b(veneerInst, nextElemOffset);
+
+ // Now point branchInst at veneer. See also SetNextLink() above.
+ branchInst->SetImmPCRawOffset(EncodeOffset(branch, veneer));
+}
+
+struct PoolHeader {
+ uint32_t data;
+
+ struct Header {
+ // The size should take into account the pool header.
+ // The size is in units of Instruction (4bytes), not byte.
+ union {
+ struct {
+ uint32_t size : 15;
+
+ // "Natural" guards are part of the normal instruction stream,
+ // while "non-natural" guards are inserted for the sole purpose
+ // of skipping around a pool.
+ uint32_t isNatural : 1;
+ uint32_t ONES : 16;
+ };
+ uint32_t data;
+ };
+
+ Header(int size_, bool isNatural_)
+ : size(size_),
+ isNatural(isNatural_),
+ ONES(0xffff)
+ { }
+
+ Header(uint32_t data)
+ : data(data)
+ {
+ VIXL_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
+ VIXL_ASSERT(ONES == 0xffff);
+ }
+
+ uint32_t raw() const {
+ VIXL_STATIC_ASSERT(sizeof(Header) == sizeof(uint32_t));
+ return data;
+ }
+ };
+
+ PoolHeader(int size_, bool isNatural_)
+ : data(Header(size_, isNatural_).raw())
+ { }
+
+ uint32_t size() const {
+ Header tmp(data);
+ return tmp.size;
+ }
+
+ uint32_t isNatural() const {
+ Header tmp(data);
+ return tmp.isNatural;
+ }
+};
+
+
+void MozBaseAssembler::WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural) {
+ static_assert(sizeof(PoolHeader) == 4);
+
+ // Get the total size of the pool.
+ const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize();
+ const uintptr_t totalPoolInstructions = totalPoolSize / kInstructionSize;
+
+ VIXL_ASSERT((totalPoolSize & 0x3) == 0);
+ VIXL_ASSERT(totalPoolInstructions < (1 << 15));
+
+ PoolHeader header(totalPoolInstructions, isNatural);
+ *(PoolHeader*)start = header;
+}
+
+
+void MozBaseAssembler::WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural) {
+ return;
+}
+
+
+void MozBaseAssembler::WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest) {
+ int byteOffset = dest.getOffset() - branch.getOffset();
+ VIXL_ASSERT(byteOffset % kInstructionSize == 0);
+
+ int instOffset = byteOffset >> kInstructionSizeLog2;
+ Assembler::b(inst, instOffset);
+}
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
new file mode 100644
index 0000000000..5d12f81bb1
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozBaseAssembler-vixl.h
@@ -0,0 +1,356 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef jit_arm64_vixl_MozBaseAssembler_vixl_h
+#define jit_arm64_vixl_MozBaseAssembler_vixl_h
+
+
+#include "mozilla/Assertions.h" // MOZ_ASSERT
+#include "mozilla/Sprintf.h" // SprintfLiteral
+
+#include <stddef.h> // size_t
+#include <stdint.h> // uint8_t, uint32_t
+#include <string.h> // strstr
+
+#include "jit/arm64/vixl/Constants-vixl.h" // vixl::{HINT, NOP, ImmHint_offset}
+#include "jit/arm64/vixl/Globals-vixl.h" // VIXL_ASSERT
+#include "jit/arm64/vixl/Instructions-vixl.h" // vixl::{Instruction, NumShortBranchRangeTypes, Instr, ImmBranchRangeType}
+
+#include "jit/Label.h" // jit::Label
+#include "jit/shared/Assembler-shared.h" // jit::AssemblerShared
+#include "jit/shared/Disassembler-shared.h" // jit::DisassemblerSpew
+#include "jit/shared/IonAssemblerBuffer.h" // jit::BufferOffset
+#include "jit/shared/IonAssemblerBufferWithConstantPools.h" // jit::AssemblerBufferWithConstantPools
+
+namespace vixl {
+
+
+using js::jit::BufferOffset;
+using js::jit::DisassemblerSpew;
+using js::jit::Label;
+
+using LabelDoc = DisassemblerSpew::LabelDoc;
+using LiteralDoc = DisassemblerSpew::LiteralDoc;
+
+#ifdef JS_DISASM_ARM64
+void DisassembleInstruction(char* buffer, size_t bufsize, const Instruction* instr);
+#endif
+
+class MozBaseAssembler;
+typedef js::jit::AssemblerBufferWithConstantPools<1024, 4, Instruction, MozBaseAssembler,
+ NumShortBranchRangeTypes> ARMBuffer;
+
+// Base class for vixl::Assembler, for isolating Moz-specific changes to VIXL.
+class MozBaseAssembler : public js::jit::AssemblerShared {
+ // Buffer initialization constants.
+ static const unsigned BufferGuardSize = 1;
+ static const unsigned BufferHeaderSize = 1;
+ static const size_t BufferCodeAlignment = 8;
+ static const size_t BufferMaxPoolOffset = 1024;
+ static const unsigned BufferPCBias = 0;
+ static const uint32_t BufferAlignmentFillInstruction = HINT | (NOP << ImmHint_offset);
+ static const uint32_t BufferNopFillInstruction = HINT | (NOP << ImmHint_offset);
+ static const unsigned BufferNumDebugNopsToInsert = 0;
+
+#ifdef JS_DISASM_ARM64
+ static constexpr const char* const InstrIndent = " ";
+ static constexpr const char* const LabelIndent = " ";
+ static constexpr const char* const TargetIndent = " ";
+#endif
+
+ public:
+ MozBaseAssembler()
+ : armbuffer_(BufferGuardSize,
+ BufferHeaderSize,
+ BufferCodeAlignment,
+ BufferMaxPoolOffset,
+ BufferPCBias,
+ BufferAlignmentFillInstruction,
+ BufferNopFillInstruction,
+ BufferNumDebugNopsToInsert)
+ {
+#ifdef JS_DISASM_ARM64
+ spew_.setLabelIndent(LabelIndent);
+ spew_.setTargetIndent(TargetIndent);
+#endif
+}
+ ~MozBaseAssembler()
+ {
+#ifdef JS_DISASM_ARM64
+ spew_.spewOrphans();
+#endif
+ }
+
+ public:
+ // Return the Instruction at a given byte offset.
+ Instruction* getInstructionAt(BufferOffset offset) {
+ return armbuffer_.getInst(offset);
+ }
+
+ // Return the byte offset of a bound label.
+ template <typename T>
+ inline T GetLabelByteOffset(const js::jit::Label* label) {
+ VIXL_ASSERT(label->bound());
+ static_assert(sizeof(T) >= sizeof(uint32_t));
+ return reinterpret_cast<T>(label->offset());
+ }
+
+ protected:
+ // Get the buffer offset of the next inserted instruction. This may flush
+ // constant pools.
+ BufferOffset nextInstrOffset() {
+ return armbuffer_.nextInstrOffset();
+ }
+
+ // Get the next usable buffer offset. Note that a constant pool may be placed
+ // here before the next instruction is emitted.
+ BufferOffset nextOffset() const {
+ return armbuffer_.nextOffset();
+ }
+
+ // Allocate memory in the buffer by forwarding to armbuffer_.
+ // Propagate OOM errors.
+ BufferOffset allocLiteralLoadEntry(size_t numInst, unsigned numPoolEntries,
+ uint8_t* inst, uint8_t* data,
+ const LiteralDoc& doc = LiteralDoc(),
+ ARMBuffer::PoolEntry* pe = nullptr)
+ {
+ MOZ_ASSERT(inst);
+ MOZ_ASSERT(numInst == 1); /* If not, then fix disassembly */
+ BufferOffset offset = armbuffer_.allocEntry(numInst, numPoolEntries, inst,
+ data, pe);
+ propagateOOM(offset.assigned());
+#ifdef JS_DISASM_ARM64
+ Instruction* instruction = armbuffer_.getInstOrNull(offset);
+ if (instruction)
+ spewLiteralLoad(offset,
+ reinterpret_cast<vixl::Instruction*>(instruction), doc);
+#endif
+ return offset;
+ }
+
+#ifdef JS_DISASM_ARM64
+ DisassemblerSpew spew_;
+
+ void spew(BufferOffset offs, const vixl::Instruction* instr) {
+ if (spew_.isDisabled() || !instr)
+ return;
+
+ char buffer[2048];
+ DisassembleInstruction(buffer, sizeof(buffer), instr);
+ spew_.spew("%06" PRIx32 " %08" PRIx32 "%s%s",
+ (uint32_t)offs.getOffset(),
+ instr->InstructionBits(), InstrIndent, buffer);
+ }
+
+ void spewBranch(BufferOffset offs,
+ const vixl::Instruction* instr, const LabelDoc& target) {
+ if (spew_.isDisabled() || !instr)
+ return;
+
+ char buffer[2048];
+ DisassembleInstruction(buffer, sizeof(buffer), instr);
+
+ char labelBuf[128];
+ labelBuf[0] = 0;
+
+ bool hasTarget = target.valid;
+ if (!hasTarget)
+ SprintfLiteral(labelBuf, "-> (link-time target)");
+
+ if (instr->IsImmBranch() && hasTarget) {
+ // The target information in the instruction is likely garbage, so remove it.
+ // The target label will in any case be printed if we have it.
+ //
+ // The format of the instruction disassembly is /.*#.*/. Strip the # and later.
+ size_t i;
+ const size_t BUFLEN = sizeof(buffer)-1;
+ for ( i=0 ; i < BUFLEN && buffer[i] && buffer[i] != '#' ; i++ )
+ ;
+ buffer[i] = 0;
+
+ SprintfLiteral(labelBuf, "-> %d%s", target.doc, !target.bound ? "f" : "");
+ hasTarget = false;
+ }
+
+ spew_.spew("%06" PRIx32 " %08" PRIx32 "%s%s%s",
+ (uint32_t)offs.getOffset(),
+ instr->InstructionBits(), InstrIndent, buffer, labelBuf);
+
+ if (hasTarget)
+ spew_.spewRef(target);
+ }
+
+ void spewLiteralLoad(BufferOffset offs,
+ const vixl::Instruction* instr, const LiteralDoc& doc) {
+ if (spew_.isDisabled() || !instr)
+ return;
+
+ char buffer[2048];
+ DisassembleInstruction(buffer, sizeof(buffer), instr);
+
+ char litbuf[2048];
+ spew_.formatLiteral(doc, litbuf, sizeof(litbuf));
+
+ // The instruction will have the form /^.*pc\+0/ followed by junk that we
+ // don't need; try to strip it.
+
+ char *probe = strstr(buffer, "pc+0");
+ if (probe)
+ *(probe + 4) = 0;
+ spew_.spew("%06" PRIx32 " %08" PRIx32 "%s%s ; .const %s",
+ (uint32_t)offs.getOffset(),
+ instr->InstructionBits(), InstrIndent, buffer, litbuf);
+ }
+
+ LabelDoc refLabel(Label* label) {
+ if (spew_.isDisabled())
+ return LabelDoc();
+
+ return spew_.refLabel(label);
+ }
+#else
+ LabelDoc refLabel(js::jit::Label*) {
+ return LabelDoc();
+ }
+#endif
+
+ // Emit the instruction, returning its offset.
+ BufferOffset Emit(Instr instruction, bool isBranch = false) {
+ static_assert(sizeof(instruction) == kInstructionSize);
+ // TODO: isBranch is obsolete and should be removed.
+ (void)isBranch;
+ MOZ_ASSERT(hasCreator());
+ BufferOffset offs = armbuffer_.putInt(*(uint32_t*)(&instruction));
+#ifdef JS_DISASM_ARM64
+ if (!isBranch)
+ spew(offs, armbuffer_.getInstOrNull(offs));
+#endif
+ return offs;
+ }
+
+ BufferOffset EmitBranch(Instr instruction, const LabelDoc& doc) {
+ BufferOffset offs = Emit(instruction, true);
+#ifdef JS_DISASM_ARM64
+ spewBranch(offs, armbuffer_.getInstOrNull(offs), doc);
+#endif
+ return offs;
+ }
+
+ public:
+ // Emit the instruction at |at|.
+ static void Emit(Instruction* at, Instr instruction) {
+ static_assert(sizeof(instruction) == kInstructionSize);
+ memcpy(at, &instruction, sizeof(instruction));
+ }
+
+ static void EmitBranch(Instruction* at, Instr instruction) {
+ // TODO: Assert that the buffer already has the instruction marked as a branch.
+ Emit(at, instruction);
+ }
+
+ // Emit data inline in the instruction stream.
+ BufferOffset EmitData(void const * data, unsigned size) {
+ VIXL_ASSERT(size % 4 == 0);
+ MOZ_ASSERT(hasCreator());
+ return armbuffer_.allocEntry(size / sizeof(uint32_t), 0, (uint8_t*)(data), nullptr);
+ }
+
+ public:
+ // Size of the code generated in bytes, including pools.
+ size_t SizeOfCodeGenerated() const {
+ return armbuffer_.size();
+ }
+
+ // Move the pool into the instruction stream.
+ void flushBuffer() {
+ armbuffer_.flushPool();
+ }
+
+ // Inhibit pool flushing for the given number of instructions.
+ // Generating more than |maxInst| instructions in a no-pool region
+ // triggers an assertion within the ARMBuffer.
+ // Does not nest.
+ void enterNoPool(size_t maxInst) {
+ armbuffer_.enterNoPool(maxInst);
+ }
+
+ // Marks the end of a no-pool region.
+ void leaveNoPool() {
+ armbuffer_.leaveNoPool();
+ }
+
+ void enterNoNops() {
+ armbuffer_.enterNoNops();
+ }
+ void leaveNoNops() {
+ armbuffer_.leaveNoNops();
+ }
+
+ public:
+ // Static interface used by IonAssemblerBufferWithConstantPools.
+ static void InsertIndexIntoTag(uint8_t* load, uint32_t index);
+ static bool PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr);
+ static void PatchShortRangeBranchToVeneer(ARMBuffer*, unsigned rangeIdx, BufferOffset deadline,
+ BufferOffset veneer);
+ static uint32_t PlaceConstantPoolBarrier(int offset);
+
+ static void WritePoolHeader(uint8_t* start, js::jit::Pool* p, bool isNatural);
+ static void WritePoolFooter(uint8_t* start, js::jit::Pool* p, bool isNatural);
+ static void WritePoolGuard(BufferOffset branch, Instruction* inst, BufferOffset dest);
+
+ protected:
+ // Functions for managing Labels and linked lists of Label uses.
+
+ // Get the next Label user in the linked list of Label uses.
+ // Return an unassigned BufferOffset when the end of the list is reached.
+ BufferOffset NextLink(BufferOffset cur);
+
+ // Patch the instruction at cur to link to the instruction at next.
+ void SetNextLink(BufferOffset cur, BufferOffset next);
+
+ // Link the current (not-yet-emitted) instruction to the specified label,
+ // then return a raw offset to be encoded in the instruction.
+ ptrdiff_t LinkAndGetByteOffsetTo(BufferOffset branch, js::jit::Label* label);
+ ptrdiff_t LinkAndGetInstructionOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ js::jit::Label* label);
+ ptrdiff_t LinkAndGetPageOffsetTo(BufferOffset branch, js::jit::Label* label);
+
+ // A common implementation for the LinkAndGet<Type>OffsetTo helpers.
+ ptrdiff_t LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange,
+ unsigned elementSizeBits, js::jit::Label* label);
+
+ protected:
+ // The buffer into which code and relocation info are generated.
+ ARMBuffer armbuffer_;
+};
+
+
+} // namespace vixl
+
+
+#endif // jit_arm64_vixl_MozBaseAssembler_vixl_h
+
diff --git a/js/src/jit/arm64/vixl/MozCachingDecoder.h b/js/src/jit/arm64/vixl/MozCachingDecoder.h
new file mode 100644
index 0000000000..5b4cfc17d5
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozCachingDecoder.h
@@ -0,0 +1,179 @@
+#ifndef VIXL_A64_MOZ_CACHING_DECODER_A64_H_
+#define VIXL_A64_MOZ_CACHING_DECODER_A64_H_
+
+#include "mozilla/HashTable.h"
+
+#include "jit/arm64/vixl/Decoder-vixl.h"
+#include "js/AllocPolicy.h"
+
+#ifdef DEBUG
+#define JS_CACHE_SIMULATOR_ARM64 1
+#endif
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+namespace vixl {
+
+// This enumeration list the different kind of instructions which can be
+// decoded. These kind correspond to the set of visitor defined by the default
+// Decoder.
+enum class InstDecodedKind : uint8_t {
+ NotDecodedYet,
+#define DECLARE(E) E,
+ VISITOR_LIST(DECLARE)
+#undef DECLARE
+};
+
+// A SinglePageDecodeCache is used to store the decoded kind of all instructions
+// in an executable page of code. Each time an instruction is decoded, its
+// decoded kind is recorded in this structure. The previous instruction value is
+// also recorded in this structure when using a debug build.
+//
+// The next time the same offset is visited, the instruction would be decoded
+// using the previously recorded decode kind. It is also compared against the
+// previously recorded bits of the instruction to check for potential missing
+// cache invalidations, in debug builds.
+//
+// This structure stores the equivalent of a single page of code to have better
+// memory locality when using the simulator. As opposed to having a hash-table
+// for all instructions. However a hash-table is used by the CachingDecoder to
+// map the prefixes of page addresses to these SinglePageDecodeCaches.
+class SinglePageDecodeCache {
+ public:
+ static const uintptr_t PageSize = 1 << 12;
+ static const uintptr_t PageMask = PageSize - 1;
+ static const uintptr_t InstSize = vixl::kInstructionSize;
+ static const uintptr_t InstMask = InstSize - 1;
+ static const uintptr_t InstPerPage = PageSize / InstSize;
+
+ SinglePageDecodeCache(const Instruction* inst)
+ : pageStart_(PageStart(inst))
+ {
+ memset(&decodeCache_, int(InstDecodedKind::NotDecodedYet), sizeof(decodeCache_));
+ }
+ // Compute the start address of the page which contains this instruction.
+ static uintptr_t PageStart(const Instruction* inst) {
+ return uintptr_t(inst) & ~PageMask;
+ }
+ // Returns whether the instruction decoded kind is stored in this
+ // SinglePageDecodeCache.
+ bool contains(const Instruction* inst) {
+ return pageStart_ == PageStart(inst);
+ }
+ void clearDecode(const Instruction* inst) {
+ uintptr_t offset = (uintptr_t(inst) & PageMask) / InstSize;
+ decodeCache_[offset] = InstDecodedKind::NotDecodedYet;
+ }
+ InstDecodedKind* decodePtr(const Instruction* inst) {
+ uintptr_t offset = (uintptr_t(inst) & PageMask) / InstSize;
+ uint32_t instValue = *reinterpret_cast<const uint32_t*>(inst);
+ instCache_[offset] = instValue;
+ return &decodeCache_[offset];
+ }
+ InstDecodedKind decode(const Instruction* inst) const {
+ uintptr_t offset = (uintptr_t(inst) & PageMask) / InstSize;
+ InstDecodedKind val = decodeCache_[offset];
+ uint32_t instValue = *reinterpret_cast<const uint32_t*>(inst);
+ MOZ_ASSERT_IF(val != InstDecodedKind::NotDecodedYet,
+ instCache_[offset] == instValue);
+ return val;
+ }
+
+ private:
+ // Record the address at which the corresponding code page starts.
+ const uintptr_t pageStart_;
+
+ // Cache what instruction got decoded previously, in order to assert if we see
+ // any stale instructions after.
+ uint32_t instCache_[InstPerPage];
+
+ // Cache the decoding of the instruction such that we can skip the decoding
+ // part.
+ InstDecodedKind decodeCache_[InstPerPage];
+};
+
+// A DecoderVisitor which will record which visitor function should be called
+// the next time we want to decode the same instruction.
+class CachingDecoderVisitor : public DecoderVisitor {
+ public:
+ CachingDecoderVisitor() = default;
+ virtual ~CachingDecoderVisitor() {}
+
+#define DECLARE(A) virtual void Visit##A(const Instruction* instr) { \
+ if (last_) { \
+ MOZ_ASSERT(*last_ == InstDecodedKind::NotDecodedYet); \
+ *last_ = InstDecodedKind::A; \
+ last_ = nullptr; \
+ } \
+ };
+
+ VISITOR_LIST(DECLARE)
+#undef DECLARE
+
+ void setDecodePtr(InstDecodedKind* ptr) {
+ last_ = ptr;
+ }
+
+ private:
+ InstDecodedKind* last_;
+};
+
+// The Caching decoder works by extending the default vixl Decoder class. It
+// extends it by overloading the Decode function.
+//
+// The overloaded Decode function checks whether the instruction given as
+// argument got decoded before or since it got invalidated. If it was not
+// previously decoded, the value of the instruction is recorded as well as the
+// kind of instruction. Otherwise, the value of the instruction is checked
+// against the previously recorded value and the instruction kind is used to
+// skip the decoding visitor and resume the execution of instruction.
+//
+// The caching decoder stores the equivalent of a page of executable code in a
+// hash-table. Each SinglePageDecodeCache stores an array of decoded kind as
+// well as the value of the previously decoded instruction.
+//
+// When testing if an instruction was decoded before, we check if the address of
+// the instruction is contained in the last SinglePageDecodeCache. If it is not,
+// then the hash-table entry is queried and created if necessary, and the last
+// SinglePageDecodeCache is updated. Then, the last SinglePageDecodeCache
+// necessary contains the decoded kind of the instruction given as argument.
+//
+// The caching decoder add an extra function for flushing the cache, which is in
+// charge of clearing the decoded kind of instruction in the range of addresses
+// given as argument. This is indirectly called by
+// CPU::EnsureIAndDCacheCoherency.
+class CachingDecoder : public Decoder {
+ using ICacheMap = mozilla::HashMap<uintptr_t, SinglePageDecodeCache*>;
+ public:
+ CachingDecoder()
+ : lastPage_(nullptr)
+ {
+ PrependVisitor(&cachingDecoder_);
+ }
+ ~CachingDecoder() {
+ RemoveVisitor(&cachingDecoder_);
+ }
+
+ void Decode(const Instruction* instr);
+ void Decode(Instruction* instr) {
+ Decode(const_cast<const Instruction*>(instr));
+ }
+
+ void FlushICache(void* start, size_t size);
+
+ private:
+ // Record the type of the decoded instruction, to avoid decoding it a second
+ // time the next time we execute it.
+ CachingDecoderVisitor cachingDecoder_;
+
+ // Store the mapping of Instruction pointer to the corresponding
+ // SinglePageDecodeCache.
+ ICacheMap iCache_;
+
+ // Record the last SinglePageDecodeCache seen, such that we can quickly access
+ // it for the next instruction.
+ SinglePageDecodeCache* lastPage_;
+};
+
+}
+#endif // !JS_CACHE_SIMULATOR_ARM64
+#endif // !VIXL_A64_MOZ_CACHING_DECODER_A64_H_
diff --git a/js/src/jit/arm64/vixl/MozCpu-vixl.cpp b/js/src/jit/arm64/vixl/MozCpu-vixl.cpp
new file mode 100644
index 0000000000..909cc590ae
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozCpu-vixl.cpp
@@ -0,0 +1,226 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Cpu-vixl.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+#include "util/WindowsWrapper.h"
+
+#if defined(XP_DARWIN)
+# include <libkern/OSCacheControl.h>
+#endif
+
+namespace vixl {
+
+// Currently computes I and D cache line size.
+void CPU::SetUp() {
+ uint32_t cache_type_register = GetCacheType();
+
+ // The cache type register holds information about the caches, including I
+ // D caches line size.
+ static const int kDCacheLineSizeShift = 16;
+ static const int kICacheLineSizeShift = 0;
+ static const uint32_t kDCacheLineSizeMask = 0xf << kDCacheLineSizeShift;
+ static const uint32_t kICacheLineSizeMask = 0xf << kICacheLineSizeShift;
+
+ // The cache type register holds the size of the I and D caches in words as
+ // a power of two.
+ uint32_t dcache_line_size_power_of_two =
+ (cache_type_register & kDCacheLineSizeMask) >> kDCacheLineSizeShift;
+ uint32_t icache_line_size_power_of_two =
+ (cache_type_register & kICacheLineSizeMask) >> kICacheLineSizeShift;
+
+ dcache_line_size_ = 4 << dcache_line_size_power_of_two;
+ icache_line_size_ = 4 << icache_line_size_power_of_two;
+
+ // Bug 1521158 suggests that having CPU with different cache line sizes could
+ // cause issues as we would only invalidate half of the cache line of we
+ // invalidate every 128 bytes, but other little cores have a different stride
+ // such as 64 bytes. To be conservative, we will try reducing the stride to 32
+ // bytes, which should be smaller than any known cache line.
+ const uint32_t conservative_line_size = 32;
+ dcache_line_size_ = std::min(dcache_line_size_, conservative_line_size);
+ icache_line_size_ = std::min(icache_line_size_, conservative_line_size);
+}
+
+
+uint32_t CPU::GetCacheType() {
+#if defined(__aarch64__) && (defined(__linux__) || defined(__android__))
+ uint64_t cache_type_register;
+ // Copy the content of the cache type register to a core register.
+ __asm__ __volatile__ ("mrs %[ctr], ctr_el0" // NOLINT
+ : [ctr] "=r" (cache_type_register));
+ VIXL_ASSERT(IsUint32(cache_type_register));
+ return static_cast<uint32_t>(cache_type_register);
+#else
+ // This will lead to a cache with 1 byte long lines, which is fine since
+ // neither EnsureIAndDCacheCoherency nor the simulator will need this
+ // information.
+ return 0;
+#endif
+}
+
+void CPU::EnsureIAndDCacheCoherency(void* address, size_t length) {
+#if defined(JS_SIMULATOR_ARM64) && defined(JS_CACHE_SIMULATOR_ARM64)
+ // This code attempts to emulate what the following assembly sequence is
+ // doing, which is sending the information to all cores that some cache line
+ // have to be invalidated and invalidating them only on the current core.
+ //
+ // This is done by recording the current range to be flushed to all
+ // simulators, then if there is a simulator associated with the current
+ // thread, applying all flushed ranges as the "isb" instruction would do.
+ //
+ // As we have no control over the CPU cores used by the code generator and the
+ // execution threads, this code assumes that each thread runs on its own core.
+ //
+ // See Bug 1529933 for more detailed explanation of this issue.
+ using js::jit::SimulatorProcess;
+ js::jit::AutoLockSimulatorCache alsc;
+ if (length > 0) {
+ SimulatorProcess::recordICacheFlush(address, length);
+ }
+ Simulator* sim = vixl::Simulator::Current();
+ if (sim) {
+ sim->FlushICache();
+ }
+#elif defined(_MSC_VER) && defined(_M_ARM64)
+ FlushInstructionCache(GetCurrentProcess(), address, length);
+#elif defined(XP_DARWIN)
+ sys_icache_invalidate(address, length);
+#elif defined(__aarch64__) && (defined(__linux__) || defined(__android__))
+ // Implement the cache synchronisation for all targets where AArch64 is the
+ // host, even if we're building the simulator for an AAarch64 host. This
+ // allows for cases where the user wants to simulate code as well as run it
+ // natively.
+
+ if (length == 0) {
+ return;
+ }
+
+ // The code below assumes user space cache operations are allowed.
+
+ // Work out the line sizes for each cache, and use them to determine the
+ // start addresses.
+ uintptr_t start = reinterpret_cast<uintptr_t>(address);
+ uintptr_t dsize = static_cast<uintptr_t>(dcache_line_size_);
+ uintptr_t isize = static_cast<uintptr_t>(icache_line_size_);
+ uintptr_t dline = start & ~(dsize - 1);
+ uintptr_t iline = start & ~(isize - 1);
+
+ // Cache line sizes are always a power of 2.
+ VIXL_ASSERT(IsPowerOf2(dsize));
+ VIXL_ASSERT(IsPowerOf2(isize));
+ uintptr_t end = start + length;
+
+ do {
+ __asm__ __volatile__ (
+ // Clean each line of the D cache containing the target data.
+ //
+ // dc : Data Cache maintenance
+ // c : Clean
+ // i : Invalidate
+ // va : by (Virtual) Address
+ // c : to the point of Coherency
+ // Original implementation used cvau, but changed to civac due to
+ // errata on Cortex-A53 819472, 826319, 827319 and 824069.
+ // See ARM DDI 0406B page B2-12 for more information.
+ //
+ " dc civac, %[dline]\n"
+ :
+ : [dline] "r" (dline)
+ // This code does not write to memory, but the "memory" dependency
+ // prevents GCC from reordering the code.
+ : "memory");
+ dline += dsize;
+ } while (dline < end);
+
+ __asm__ __volatile__ (
+ // Make sure that the data cache operations (above) complete before the
+ // instruction cache operations (below).
+ //
+ // dsb : Data Synchronisation Barrier
+ // ish : Inner SHareable domain
+ //
+ // The point of unification for an Inner Shareable shareability domain is
+ // the point by which the instruction and data caches of all the processors
+ // in that Inner Shareable shareability domain are guaranteed to see the
+ // same copy of a memory location. See ARM DDI 0406B page B2-12 for more
+ // information.
+ " dsb ish\n"
+ : : : "memory");
+
+ do {
+ __asm__ __volatile__ (
+ // Invalidate each line of the I cache containing the target data.
+ //
+ // ic : Instruction Cache maintenance
+ // i : Invalidate
+ // va : by Address
+ // u : to the point of Unification
+ " ic ivau, %[iline]\n"
+ :
+ : [iline] "r" (iline)
+ : "memory");
+ iline += isize;
+ } while (iline < end);
+
+ __asm__ __volatile__(
+ // Make sure that the instruction cache operations (above) take effect
+ // before the isb (below).
+ " dsb ish\n"
+
+ // Ensure that any instructions already in the pipeline are discarded and
+ // reloaded from the new data.
+ // isb : Instruction Synchronisation Barrier
+ " isb\n"
+ :
+ :
+ : "memory");
+#else
+ // If the host isn't AArch64, we must be using the simulator, so this function
+ // doesn't have to do anything.
+ USE(address, length);
+#endif
+}
+
+void CPU::FlushExecutionContext() {
+#if defined(JS_SIMULATOR_ARM64) && defined(JS_CACHE_SIMULATOR_ARM64)
+ // Performing an 'isb' will ensure the current core instruction pipeline is
+ // synchronized with an icache flush executed by another core.
+ using js::jit::SimulatorProcess;
+ js::jit::AutoLockSimulatorCache alsc;
+ Simulator* sim = vixl::Simulator::Current();
+ if (sim) {
+ sim->FlushICache();
+ }
+#elif defined(__aarch64__)
+ // Ensure that any instructions already in the pipeline are discarded and
+ // reloaded from the icache.
+ __asm__ __volatile__("isb\n" : : : "memory");
+#endif
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
new file mode 100644
index 0000000000..398f864493
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozInstructions-vixl.cpp
@@ -0,0 +1,211 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/Architecture-arm64.h"
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+
+namespace vixl {
+
+bool Instruction::IsUncondB() const {
+ return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | B);
+}
+
+
+bool Instruction::IsCondB() const {
+ return Mask(ConditionalBranchMask) == (ConditionalBranchFixed | B_cond);
+}
+
+
+bool Instruction::IsBL() const {
+ return Mask(UnconditionalBranchMask) == (UnconditionalBranchFixed | BL);
+}
+
+
+bool Instruction::IsBR() const {
+ return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BR);
+}
+
+
+bool Instruction::IsBLR() const {
+ return Mask(UnconditionalBranchToRegisterMask) == (UnconditionalBranchToRegisterFixed | BLR);
+}
+
+
+bool Instruction::IsTBZ() const {
+ return Mask(TestBranchMask) == TBZ;
+}
+
+
+bool Instruction::IsTBNZ() const {
+ return Mask(TestBranchMask) == TBNZ;
+}
+
+
+bool Instruction::IsCBZ() const {
+ return Mask(CompareBranchMask) == CBZ_w || Mask(CompareBranchMask) == CBZ_x;
+}
+
+
+bool Instruction::IsCBNZ() const {
+ return Mask(CompareBranchMask) == CBNZ_w || Mask(CompareBranchMask) == CBNZ_x;
+}
+
+
+bool Instruction::IsLDR() const {
+ return Mask(LoadLiteralMask) == LDR_x_lit;
+}
+
+
+bool Instruction::IsNOP() const {
+ return Mask(SystemHintMask) == HINT && ImmHint() == NOP;
+}
+
+
+bool Instruction::IsCSDB() const {
+ return Mask(SystemHintMask) == HINT && ImmHint() == CSDB;
+}
+
+
+bool Instruction::IsADR() const {
+ return Mask(PCRelAddressingMask) == ADR;
+}
+
+
+bool Instruction::IsADRP() const {
+ return Mask(PCRelAddressingMask) == ADRP;
+}
+
+
+bool Instruction::IsMovz() const {
+ return (Mask(MoveWideImmediateMask) == MOVZ_x) ||
+ (Mask(MoveWideImmediateMask) == MOVZ_w);
+}
+
+
+bool Instruction::IsMovk() const {
+ return (Mask(MoveWideImmediateMask) == MOVK_x) ||
+ (Mask(MoveWideImmediateMask) == MOVK_w);
+}
+
+bool Instruction::IsBranchLinkImm() const {
+ return Mask(UnconditionalBranchFMask) == (UnconditionalBranchFixed | BL);
+}
+
+
+bool Instruction::IsTargetReachable(const Instruction* target) const {
+ VIXL_ASSERT(((target - this) & 3) == 0);
+ int offset = (target - this) >> kInstructionSizeLog2;
+ switch (BranchType()) {
+ case CondBranchType:
+ return IsInt19(offset);
+ case UncondBranchType:
+ return IsInt26(offset);
+ case CompareBranchType:
+ return IsInt19(offset);
+ case TestBranchType:
+ return IsInt14(offset);
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+ptrdiff_t Instruction::ImmPCRawOffset() const {
+ ptrdiff_t offset;
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP.
+ offset = ImmPCRel();
+ } else if (BranchType() == UnknownBranchType) {
+ offset = ImmLLiteral();
+ } else {
+ offset = ImmBranch();
+ }
+ return offset;
+}
+
+void
+Instruction::SetImmPCRawOffset(ptrdiff_t offset)
+{
+ if (IsPCRelAddressing()) {
+ // ADR and ADRP. We're encoding a raw offset here.
+ // See also SetPCRelImmTarget().
+ Instr imm = vixl::Assembler::ImmPCRelAddress(offset);
+ SetInstructionBits(Mask(~ImmPCRel_mask) | imm);
+ } else {
+ SetBranchImmTarget(this + (offset << kInstructionSizeLog2));
+ }
+}
+
+// Is this a stack pointer synchronization instruction as inserted by
+// MacroAssembler::syncStackPtr()?
+bool
+Instruction::IsStackPtrSync() const
+{
+ // The stack pointer sync is a move to the stack pointer.
+ // This is encoded as 'add sp, Rs, #0'.
+ return IsAddSubImmediate() && Rd() == js::jit::Registers::sp && ImmAddSub() == 0;
+}
+
+// Skip over a constant pool at |this| if there is one.
+//
+// If |this| is pointing to the artifical guard branch around a constant pool,
+// return the instruction after the pool. Otherwise return |this| itself.
+//
+// This function does not skip constant pools with a natural guard branch. It
+// is assumed that anyone inspecting the instruction stream understands about
+// branches that were inserted naturally.
+const Instruction*
+Instruction::skipPool() const
+{
+ // Artificial pool guards can only be B (rather than BR), and they must be
+ // forward branches.
+ if (!IsUncondB() || ImmUncondBranch() <= 0)
+ return this;
+
+ // Check for a constant pool header which has the high 16 bits set. See
+ // struct PoolHeader. Bit 15 indicates a natural pool guard when set. It
+ // must be clear which indicates an artificial pool guard.
+ const Instruction *header = InstructionAtOffset(kInstructionSize);
+ if (header->Mask(0xffff8000) != 0xffff0000)
+ return this;
+
+ // OK, this is an artificial jump around a constant pool.
+ return ImmPCOffsetTarget();
+}
+
+
+void Instruction::SetBits32(int msb, int lsb, unsigned value) {
+ uint32_t me;
+ memcpy(&me, this, sizeof(me));
+ uint32_t new_mask = (1 << (msb+1)) - (1 << lsb);
+ uint32_t keep_mask = ~new_mask;
+ me = (me & keep_mask) | ((value << lsb) & new_mask);
+ memcpy(this, &me, sizeof(me));
+}
+
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
new file mode 100644
index 0000000000..b6ac3952c6
--- /dev/null
+++ b/js/src/jit/arm64/vixl/MozSimulator-vixl.cpp
@@ -0,0 +1,1239 @@
+// Copyright 2013, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "mozilla/DebugOnly.h"
+
+#include "jit/arm64/vixl/Debugger-vixl.h"
+#include "jit/arm64/vixl/MozCachingDecoder.h"
+#include "jit/arm64/vixl/Simulator-vixl.h"
+#include "jit/IonTypes.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "threading/LockGuard.h"
+#include "vm/JSContext.h"
+#include "vm/Runtime.h"
+
+js::jit::SimulatorProcess* js::jit::SimulatorProcess::singleton_ = nullptr;
+
+namespace vixl {
+
+using mozilla::DebugOnly;
+using js::jit::ABIFunctionType;
+using js::jit::JitActivation;
+using js::jit::SimulatorProcess;
+
+Simulator::Simulator(Decoder* decoder, FILE* stream)
+ : stream_(nullptr)
+ , print_disasm_(nullptr)
+ , instrumentation_(nullptr)
+ , stack_(nullptr)
+ , stack_limit_(nullptr)
+ , decoder_(nullptr)
+ , oom_(false)
+{
+ this->init(decoder, stream);
+
+ // If this environment variable is present, trace the executed instructions.
+ // (Very helpful for debugging code generation crashes.)
+ if (getenv("VIXL_TRACE")) {
+ set_trace_parameters(LOG_DISASM);
+ }
+}
+
+
+Simulator::~Simulator() {
+ js_free(stack_);
+ stack_ = nullptr;
+
+ // The decoder may outlive the simulator.
+ if (print_disasm_) {
+ decoder_->RemoveVisitor(print_disasm_);
+ js_delete(print_disasm_);
+ print_disasm_ = nullptr;
+ }
+
+ if (instrumentation_) {
+ decoder_->RemoveVisitor(instrumentation_);
+ js_delete(instrumentation_);
+ instrumentation_ = nullptr;
+ }
+}
+
+
+void Simulator::ResetState() {
+ // Reset the system registers.
+ nzcv_ = SimSystemRegister::DefaultValueFor(NZCV);
+ fpcr_ = SimSystemRegister::DefaultValueFor(FPCR);
+
+ // Reset registers to 0.
+ pc_ = nullptr;
+ pc_modified_ = false;
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ set_xreg(i, 0xbadbeef);
+ }
+ // Set FP registers to a value that is a NaN in both 32-bit and 64-bit FP.
+ uint64_t nan_bits = UINT64_C(0x7ff0dead7f8beef1);
+ VIXL_ASSERT(IsSignallingNaN(RawbitsToDouble(nan_bits & kDRegMask)));
+ VIXL_ASSERT(IsSignallingNaN(RawbitsToFloat(nan_bits & kSRegMask)));
+ for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
+ set_dreg_bits(i, nan_bits);
+ }
+ // Returning to address 0 exits the Simulator.
+ set_lr(kEndOfSimAddress);
+}
+
+
+void Simulator::init(Decoder* decoder, FILE* stream) {
+ // Ensure that shift operations act as the simulator expects.
+ VIXL_ASSERT((static_cast<int32_t>(-1) >> 1) == -1);
+ VIXL_ASSERT((static_cast<uint32_t>(-1) >> 1) == 0x7FFFFFFF);
+
+ instruction_stats_ = false;
+
+ // Set up the decoder.
+ decoder_ = decoder;
+ decoder_->AppendVisitor(this);
+
+ stream_ = stream;
+ print_disasm_ = js_new<PrintDisassembler>(stream_);
+ if (!print_disasm_) {
+ oom_ = true;
+ return;
+ }
+ set_coloured_trace(false);
+ trace_parameters_ = LOG_NONE;
+
+ ResetState();
+
+ // Allocate and set up the simulator stack.
+ stack_ = js_pod_malloc<byte>(stack_size_);
+ if (!stack_) {
+ oom_ = true;
+ return;
+ }
+ stack_limit_ = stack_ + stack_protection_size_;
+ // Configure the starting stack pointer.
+ // - Find the top of the stack.
+ byte * tos = stack_ + stack_size_;
+ // - There's a protection region at both ends of the stack.
+ tos -= stack_protection_size_;
+ // - The stack pointer must be 16-byte aligned.
+ tos = AlignDown(tos, 16);
+ set_sp(tos);
+
+ // Set the sample period to 10, as the VIXL examples and tests are short.
+ if (getenv("VIXL_STATS")) {
+ instrumentation_ = js_new<Instrument>("vixl_stats.csv", 10);
+ if (!instrumentation_) {
+ oom_ = true;
+ return;
+ }
+ }
+
+ // Print a warning about exclusive-access instructions, but only the first
+ // time they are encountered. This warning can be silenced using
+ // SilenceExclusiveAccessWarning().
+ print_exclusive_access_warning_ = true;
+}
+
+
+Simulator* Simulator::Current() {
+ JSContext* cx = js::TlsContext.get();
+ if (!cx) {
+ return nullptr;
+ }
+ JSRuntime* rt = cx->runtime();
+ if (!rt) {
+ return nullptr;
+ }
+ if (!js::CurrentThreadCanAccessRuntime(rt)) {
+ return nullptr;
+ }
+ return cx->simulator();
+}
+
+
+Simulator* Simulator::Create() {
+ Decoder *decoder = js_new<Decoder>();
+ if (!decoder)
+ return nullptr;
+
+ // FIXME: This just leaks the Decoder object for now, which is probably OK.
+ // FIXME: We should free it at some point.
+ // FIXME: Note that it can't be stored in the SimulatorRuntime due to lifetime conflicts.
+ js::UniquePtr<Simulator> sim;
+ if (getenv("USE_DEBUGGER") != nullptr) {
+ sim.reset(js_new<Debugger>(decoder, stdout));
+ } else {
+ sim.reset(js_new<Simulator>(decoder, stdout));
+ }
+
+ // Check if Simulator:init ran out of memory.
+ if (sim && sim->oom()) {
+ return nullptr;
+ }
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ // Register the simulator in the Simulator process to handle cache flushes
+ // across threads.
+ js::jit::AutoLockSimulatorCache alsc;
+ if (!SimulatorProcess::registerSimulator(sim.get())) {
+ return nullptr;
+ }
+#endif
+
+ return sim.release();
+}
+
+
+void Simulator::Destroy(Simulator* sim) {
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ if (sim) {
+ js::jit::AutoLockSimulatorCache alsc;
+ SimulatorProcess::unregisterSimulator(sim);
+ }
+#endif
+
+ js_delete(sim);
+}
+
+
+void Simulator::ExecuteInstruction() {
+ // The program counter should always be aligned.
+ VIXL_ASSERT(IsWordAligned(pc_));
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ if (pendingCacheRequests) {
+ // We're here emulating the behavior of the membarrier carried over on
+ // real hardware does; see syscalls to membarrier in MozCpu-vixl.cpp.
+ // There's a slight difference that the simulator is not being
+ // interrupted: instead, we effectively run the icache flush request
+ // before executing the next instruction, which is close enough and
+ // sufficient for our use case.
+ js::jit::AutoLockSimulatorCache alsc;
+ FlushICache();
+ }
+#endif
+ decoder_->Decode(pc_);
+ increment_pc();
+}
+
+
+uintptr_t Simulator::stackLimit() const {
+ return reinterpret_cast<uintptr_t>(stack_limit_);
+}
+
+
+uintptr_t* Simulator::addressOfStackLimit() {
+ return (uintptr_t*)&stack_limit_;
+}
+
+
+bool Simulator::overRecursed(uintptr_t newsp) const {
+ if (newsp == 0) {
+ newsp = get_sp();
+ }
+ return newsp <= stackLimit();
+}
+
+
+bool Simulator::overRecursedWithExtra(uint32_t extra) const {
+ uintptr_t newsp = get_sp() - extra;
+ return newsp <= stackLimit();
+}
+
+
+JS::ProfilingFrameIterator::RegisterState
+Simulator::registerState()
+{
+ JS::ProfilingFrameIterator::RegisterState state;
+ state.pc = (uint8_t*) get_pc();
+ state.fp = (uint8_t*) get_fp();
+ state.lr = (uint8_t*) get_lr();
+ state.sp = (uint8_t*) get_sp();
+ return state;
+}
+
+int64_t Simulator::call(uint8_t* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+
+ // First eight arguments passed in registers.
+ VIXL_ASSERT(argument_count <= 8);
+ // This code should use the type of the called function
+ // (with templates, like the callVM machinery), but since the
+ // number of called functions is miniscule, their types have been
+ // divined from the number of arguments.
+ if (argument_count == 8) {
+ // EnterJitData::jitcode.
+ set_xreg(0, va_arg(parameters, int64_t));
+ // EnterJitData::maxArgc.
+ set_xreg(1, va_arg(parameters, unsigned));
+ // EnterJitData::maxArgv.
+ set_xreg(2, va_arg(parameters, int64_t));
+ // EnterJitData::osrFrame.
+ set_xreg(3, va_arg(parameters, int64_t));
+ // EnterJitData::calleeToken.
+ set_xreg(4, va_arg(parameters, int64_t));
+ // EnterJitData::scopeChain.
+ set_xreg(5, va_arg(parameters, int64_t));
+ // EnterJitData::osrNumStackValues.
+ set_xreg(6, va_arg(parameters, unsigned));
+ // Address of EnterJitData::result.
+ set_xreg(7, va_arg(parameters, int64_t));
+ } else if (argument_count == 2) {
+ // EntryArg* args
+ set_xreg(0, va_arg(parameters, int64_t));
+ // uint8_t* GlobalData
+ set_xreg(1, va_arg(parameters, int64_t));
+ } else if (argument_count == 1) { // irregexp
+ // InputOutputData& data
+ set_xreg(0, va_arg(parameters, int64_t));
+ } else if (argument_count == 0) { // testsJit.cpp
+ // accept.
+ } else {
+ MOZ_CRASH("Unknown number of arguments");
+ }
+
+ va_end(parameters);
+
+ // Call must transition back to native code on exit.
+ VIXL_ASSERT(get_lr() == int64_t(kEndOfSimAddress));
+
+ // Execute the simulation.
+ DebugOnly<int64_t> entryStack = get_sp();
+ RunFrom((Instruction*)entry);
+ DebugOnly<int64_t> exitStack = get_sp();
+ VIXL_ASSERT(entryStack == exitStack);
+
+ int64_t result = xreg(0);
+ if (getenv("USE_DEBUGGER")) {
+ printf("LEAVE\n");
+ }
+ return result;
+}
+
+
+// When the generated code calls a VM function (masm.callWithABI) we need to
+// call that function instead of trying to execute it with the simulator
+// (because it's x64 code instead of AArch64 code). We do that by redirecting the VM
+// call to a svc (Supervisor Call) instruction that is handled by the
+// simulator. We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection
+{
+ friend class Simulator;
+
+ Redirection(void* nativeFunction, ABIFunctionType type)
+ : nativeFunction_(nativeFunction),
+ type_(type),
+ next_(nullptr)
+ {
+ next_ = SimulatorProcess::redirection();
+ SimulatorProcess::setRedirection(this);
+
+ Instruction* instr = (Instruction*)(&svcInstruction_);
+ vixl::Assembler::svc(instr, kCallRtRedirected);
+ }
+
+ public:
+ void* addressOfSvcInstruction() { return &svcInstruction_; }
+ void* nativeFunction() const { return nativeFunction_; }
+ ABIFunctionType type() const { return type_; }
+
+ static Redirection* Get(void* nativeFunction, ABIFunctionType type) {
+ js::jit::AutoLockSimulatorCache alsr;
+
+ // TODO: Store srt_ in the simulator for this assertion.
+ // VIXL_ASSERT_IF(pt->simulator(), pt->simulator()->srt_ == srt);
+
+ Redirection* current = SimulatorProcess::redirection();
+ for (; current != nullptr; current = current->next_) {
+ if (current->nativeFunction_ == nativeFunction) {
+ VIXL_ASSERT(current->type() == type);
+ return current;
+ }
+ }
+
+ // Note: we can't use js_new here because the constructor is private.
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ Redirection* redir = js_pod_malloc<Redirection>(1);
+ if (!redir)
+ oomUnsafe.crash("Simulator redirection");
+ new(redir) Redirection(nativeFunction, type);
+ return redir;
+ }
+
+ static const Redirection* FromSvcInstruction(const Instruction* svcInstruction) {
+ const uint8_t* addrOfSvc = reinterpret_cast<const uint8_t*>(svcInstruction);
+ const uint8_t* addrOfRedirection = addrOfSvc - offsetof(Redirection, svcInstruction_);
+ return reinterpret_cast<const Redirection*>(addrOfRedirection);
+ }
+
+ private:
+ void* nativeFunction_;
+ uint32_t svcInstruction_;
+ ABIFunctionType type_;
+ Redirection* next_;
+};
+
+
+
+
+void* Simulator::RedirectNativeFunction(void* nativeFunction, ABIFunctionType type) {
+ Redirection* redirection = Redirection::Get(nativeFunction, type);
+ return redirection->addressOfSvcInstruction();
+}
+
+void Simulator::VisitException(const Instruction* instr) {
+ if (instr->InstructionBits() == UNDEFINED_INST_PATTERN) {
+ uint8_t* newPC;
+ if (js::wasm::HandleIllegalInstruction(registerState(), &newPC)) {
+ set_pc((Instruction*)newPC);
+ return;
+ }
+ DoUnreachable(instr);
+ }
+
+ switch (instr->Mask(ExceptionMask)) {
+ case BRK: {
+ int lowbit = ImmException_offset;
+ int highbit = ImmException_offset + ImmException_width - 1;
+ HostBreakpoint(instr->Bits(highbit, lowbit));
+ break;
+ }
+ case HLT:
+ switch (instr->ImmException()) {
+ case kTraceOpcode:
+ DoTrace(instr);
+ return;
+ case kLogOpcode:
+ DoLog(instr);
+ return;
+ case kPrintfOpcode:
+ DoPrintf(instr);
+ return;
+ default:
+ HostBreakpoint();
+ return;
+ }
+ case SVC:
+ // The SVC instruction is hijacked by the JIT as a pseudo-instruction
+ // causing the Simulator to execute host-native code for callWithABI.
+ switch (instr->ImmException()) {
+ case kCallRtRedirected:
+ VisitCallRedirection(instr);
+ return;
+ case kMarkStackPointer: {
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!spStack_.append(get_sp()))
+ oomUnsafe.crash("tracking stack for ARM64 simulator");
+ return;
+ }
+ case kCheckStackPointer: {
+ DebugOnly<int64_t> current = get_sp();
+ DebugOnly<int64_t> expected = spStack_.popCopy();
+ VIXL_ASSERT(current == expected);
+ return;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::setGPR32Result(int32_t result) {
+ set_wreg(0, result);
+}
+
+
+void Simulator::setGPR64Result(int64_t result) {
+ set_xreg(0, result);
+}
+
+
+void Simulator::setFP32Result(float result) {
+ set_sreg(0, result);
+}
+
+
+void Simulator::setFP64Result(double result) {
+ set_dreg(0, result);
+}
+
+
+typedef int64_t (*Prototype_General0)();
+typedef int64_t (*Prototype_General1)(int64_t arg0);
+typedef int64_t (*Prototype_General2)(int64_t arg0, int64_t arg1);
+typedef int64_t (*Prototype_General3)(int64_t arg0, int64_t arg1, int64_t arg2);
+typedef int64_t (*Prototype_General4)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3);
+typedef int64_t (*Prototype_General5)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4);
+typedef int64_t (*Prototype_General6)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5);
+typedef int64_t (*Prototype_General7)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6);
+typedef int64_t (*Prototype_General8)(int64_t arg0, int64_t arg1, int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5, int64_t arg6, int64_t arg7);
+typedef int64_t (*Prototype_GeneralGeneralGeneralInt64)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+typedef int64_t (*Prototype_GeneralGeneralInt64Int64)(int64_t arg0, int64_t arg1, int64_t arg2,
+ int64_t arg3);
+
+typedef int64_t (*Prototype_Int_Double)(double arg0);
+typedef int64_t (*Prototype_Int_IntDouble)(int64_t arg0, double arg1);
+typedef int64_t (*Prototype_Int_DoubleInt)(double arg0, int64_t arg1);
+typedef int64_t (*Prototype_Int_DoubleIntInt)(double arg0, uint64_t arg1, uint64_t arg2);
+typedef int64_t (*Prototype_Int_IntDoubleIntInt)(uint64_t arg0, double arg1,
+ uint64_t arg2, uint64_t arg3);
+
+typedef float (*Prototype_Float32_Float32)(float arg0);
+typedef int64_t (*Prototype_Int_Float32)(float arg0);
+typedef float (*Prototype_Float32_Float32Float32)(float arg0, float arg1);
+
+typedef double (*Prototype_Double_None)();
+typedef double (*Prototype_Double_Double)(double arg0);
+typedef double (*Prototype_Double_Int)(int64_t arg0);
+typedef double (*Prototype_Double_DoubleInt)(double arg0, int64_t arg1);
+typedef double (*Prototype_Double_IntDouble)(int64_t arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDouble)(double arg0, double arg1);
+typedef double (*Prototype_Double_DoubleDoubleDouble)(double arg0, double arg1, double arg2);
+typedef double (*Prototype_Double_DoubleDoubleDoubleDouble)(double arg0, double arg1,
+ double arg2, double arg3);
+
+typedef int32_t (*Prototype_Int32_General)(int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32)(int64_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32)(int64_t, int32_t, int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32General)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32Int32Int32Int32General)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Float32Float32Int32Int32Int32General)(int64_t,
+ int32_t,
+ float,
+ float,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General)(int64_t,
+ int32_t,
+ float,
+ float,
+ float,
+ float,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General)(int64_t,
+ int32_t,
+ float,
+ float,
+ int32_t,
+ float,
+ float,
+ int32_t,
+ float,
+ int32_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int32General)(int64_t,
+ int32_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32Int64)(int64_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int32General)(int64_t,
+ int32_t,
+ int32_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32Int64Int64)(int64_t,
+ int32_t,
+ int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32)(int64_t,
+ int32_t,
+ int64_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt32GeneralInt32Int32)(int64_t,
+ int32_t,
+ int64_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneral)(int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralGeneral)(int64_t,
+ int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32Int32)(int64_t,
+ int64_t,
+ int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int32Int32)(int64_t, int64_t,
+ int32_t, int32_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32)(int64_t, int64_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64)(int64_t, int64_t,
+ int32_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int32Int64General)(int64_t, int64_t,
+ int32_t, int64_t,
+ int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64)(int64_t, int64_t,
+ int64_t, int64_t);
+typedef int32_t (*Prototype_Int32_GeneralInt64Int64Int64General)(int64_t, int64_t,
+ int64_t, int64_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32)(int64_t, int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32)(int64_t,
+ int32_t,
+ int32_t);
+typedef int64_t (*Prototype_General_GeneralInt32General)(int64_t,
+ int32_t,
+ int64_t);
+typedef int64_t (*Prototype_General_GeneralInt32Int32GeneralInt32)(int64_t,
+ int32_t,
+ int32_t,
+ int64_t,
+ int32_t);
+typedef int32_t (*Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32)(
+ int64_t,
+ int64_t,
+ int32_t,
+ int64_t,
+ int32_t,
+ int32_t,
+ int32_t);
+typedef int64_t (*Prototype_Int64_General)(int64_t);
+typedef int64_t (*Prototype_Int64_GeneralInt64)(int64_t, int64_t);
+
+// Simulator support for callWithABI().
+void
+Simulator::VisitCallRedirection(const Instruction* instr)
+{
+ VIXL_ASSERT(instr->Mask(ExceptionMask) == SVC);
+ VIXL_ASSERT(instr->ImmException() == kCallRtRedirected);
+
+ const Redirection* redir = Redirection::FromSvcInstruction(instr);
+ uintptr_t nativeFn = reinterpret_cast<uintptr_t>(redir->nativeFunction());
+
+ // Stack must be aligned prior to the call.
+ // FIXME: It's actually our job to perform the alignment...
+ //VIXL_ASSERT((xreg(31, Reg31IsStackPointer) & (StackAlignment - 1)) == 0);
+
+ // Used to assert that callee-saved registers are preserved.
+ DebugOnly<int64_t> x19 = xreg(19);
+ DebugOnly<int64_t> x20 = xreg(20);
+ DebugOnly<int64_t> x21 = xreg(21);
+ DebugOnly<int64_t> x22 = xreg(22);
+ DebugOnly<int64_t> x23 = xreg(23);
+ DebugOnly<int64_t> x24 = xreg(24);
+ DebugOnly<int64_t> x25 = xreg(25);
+ DebugOnly<int64_t> x26 = xreg(26);
+ DebugOnly<int64_t> x27 = xreg(27);
+ DebugOnly<int64_t> x28 = xreg(28);
+ DebugOnly<int64_t> x29 = xreg(29);
+ DebugOnly<int64_t> savedSP = get_sp();
+
+ // Remember LR for returning from the "call".
+ int64_t savedLR = xreg(30);
+
+ // Allow recursive Simulator calls: returning from the call must stop
+ // the simulation and transition back to native Simulator code.
+ set_xreg(30, int64_t(kEndOfSimAddress));
+
+ // Store argument register values in local variables for ease of use below.
+ int64_t x0 = xreg(0);
+ int64_t x1 = xreg(1);
+ int64_t x2 = xreg(2);
+ int64_t x3 = xreg(3);
+ int64_t x4 = xreg(4);
+ int64_t x5 = xreg(5);
+ int64_t x6 = xreg(6);
+ int64_t x7 = xreg(7);
+ int64_t x8 = xreg(8);
+ double d0 = dreg(0);
+ double d1 = dreg(1);
+ double d2 = dreg(2);
+ double d3 = dreg(3);
+ float s0 = sreg(0);
+ float s1 = sreg(1);
+ float s2 = sreg(2);
+ float s3 = sreg(3);
+ float s4 = sreg(4);
+
+ // Dispatch the call and set the return value.
+ switch (redir->type()) {
+ // Cases with int64_t return type.
+ case js::jit::Args_General0: {
+ int64_t ret = reinterpret_cast<Prototype_General0>(nativeFn)();
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General1: {
+ int64_t ret = reinterpret_cast<Prototype_General1>(nativeFn)(x0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General2: {
+ int64_t ret = reinterpret_cast<Prototype_General2>(nativeFn)(x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General3: {
+ int64_t ret = reinterpret_cast<Prototype_General3>(nativeFn)(x0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General4: {
+ int64_t ret = reinterpret_cast<Prototype_General4>(nativeFn)(x0, x1, x2, x3);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General5: {
+ int64_t ret = reinterpret_cast<Prototype_General5>(nativeFn)(x0, x1, x2, x3, x4);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General6: {
+ int64_t ret = reinterpret_cast<Prototype_General6>(nativeFn)(x0, x1, x2, x3, x4, x5);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General7: {
+ int64_t ret = reinterpret_cast<Prototype_General7>(nativeFn)(x0, x1, x2, x3, x4, x5, x6);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General8: {
+ int64_t ret = reinterpret_cast<Prototype_General8>(nativeFn)(x0, x1, x2, x3, x4, x5, x6, x7);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_GeneralGeneralGeneralInt64: {
+ int64_t ret = reinterpret_cast<Prototype_GeneralGeneralGeneralInt64>(nativeFn)(x0, x1, x2, x3);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_GeneralGeneralInt64Int64: {
+ int64_t ret = reinterpret_cast<Prototype_GeneralGeneralInt64Int64>(nativeFn)(x0, x1, x2, x3);
+ setGPR64Result(ret);
+ break;
+ }
+
+ // Cases with GPR return type. This can be int32 or int64, but int64 is a safer assumption.
+ case js::jit::Args_Int_Double: {
+ int64_t ret = reinterpret_cast<Prototype_Int_Double>(nativeFn)(d0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_IntDouble: {
+ int64_t ret = reinterpret_cast<Prototype_Int_IntDouble>(nativeFn)(x0, d0);
+ setGPR64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int_DoubleInt: {
+ int64_t ret = reinterpret_cast<Prototype_Int_DoubleInt>(nativeFn)(d0, x0);
+ setGPR64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int_IntDoubleIntInt: {
+ int64_t ret = reinterpret_cast<Prototype_Int_IntDoubleIntInt>(nativeFn)(x0, d0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int_DoubleIntInt: {
+ int64_t ret = reinterpret_cast<Prototype_Int_DoubleIntInt>(nativeFn)(d0, x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+
+ // Cases with float return type.
+ case js::jit::Args_Float32_Float32: {
+ float ret = reinterpret_cast<Prototype_Float32_Float32>(nativeFn)(s0);
+ setFP32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int_Float32: {
+ int64_t ret = reinterpret_cast<Prototype_Int_Float32>(nativeFn)(s0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Float32_Float32Float32: {
+ float ret = reinterpret_cast<Prototype_Float32_Float32Float32>(nativeFn)(s0, s1);
+ setFP32Result(ret);
+ break;
+ }
+
+ // Cases with double return type.
+ case js::jit::Args_Double_None: {
+ double ret = reinterpret_cast<Prototype_Double_None>(nativeFn)();
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_Double: {
+ double ret = reinterpret_cast<Prototype_Double_Double>(nativeFn)(d0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_Int: {
+ double ret = reinterpret_cast<Prototype_Double_Int>(nativeFn)(x0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleInt: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleInt>(nativeFn)(d0, x0);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDouble>(nativeFn)(d0, d1);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDouble>(nativeFn)(d0, d1, d2);
+ setFP64Result(ret);
+ break;
+ }
+ case js::jit::Args_Double_DoubleDoubleDoubleDouble: {
+ double ret = reinterpret_cast<Prototype_Double_DoubleDoubleDoubleDouble>(nativeFn)(d0, d1, d2, d3);
+ setFP64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Double_IntDouble: {
+ double ret = reinterpret_cast<Prototype_Double_IntDouble>(nativeFn)(x0, d0);
+ setFP64Result(ret);
+ break;
+ }
+
+ case js::jit::Args_Int32_General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_General>(nativeFn)(x0);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32>(nativeFn)(x0, x1);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32>(
+ nativeFn)(x0, x1, x2);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32Int32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32>(
+ nativeFn)(x0, x1, x2, x3, x4, x5);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32General>(
+ nativeFn)(x0, x1, x2, x3, x4, x5);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32Int32Int32Int32General>(
+ nativeFn)(x0, x1, x2, x3, x4, x5, x6, x7);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Float32Float32Int32Int32Int32General>(
+ nativeFn)(x0, x1, s0, s1, x2, x3, x4, x5);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General>(
+ nativeFn)(x0, x1, s0, s1, s2, s3, x2, x3, x4, x5, x6);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General>(
+ nativeFn)(x0, x1, s0, s1, x2, s2, s3, x3, s4, x4, x5, x6, x7, x8);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int32General: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int32General>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32Int64>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int32General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int32General>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32Int64Int64>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32GeneralInt32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt32GeneralInt32Int32: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralInt32GeneralInt32Int32>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneral: {
+ int32_t ret =
+ reinterpret_cast<Prototype_Int32_GeneralGeneral>(nativeFn)(x0, x1);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralGeneral: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralGeneral>(
+ nativeFn)(x0, x1, x2);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralGeneralInt32Int32>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int32Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int32Int32>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32>(
+ nativeFn)(x0, x1, x2);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int32Int64General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int32Int64General>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64>(
+ nativeFn)(x0, x1, x2, x3);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralInt64Int64Int64General: {
+ int32_t ret = reinterpret_cast<Prototype_Int32_GeneralInt64Int64Int64General>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32>(nativeFn)(x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32: {
+ int64_t ret = reinterpret_cast<Prototype_General_GeneralInt32Int32>(
+ nativeFn)(x0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32General: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32General>(
+ nativeFn)(x0, x1, x2);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_General_GeneralInt32Int32GeneralInt32: {
+ int64_t ret =
+ reinterpret_cast<Prototype_General_GeneralInt32Int32GeneralInt32>(
+ nativeFn)(x0, x1, x2, x3, x4);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int32_GeneralGeneralInt32GeneralInt32Int32Int32: {
+ int32_t ret = reinterpret_cast<
+ Prototype_Int32_GeneralGeneralInt32GeneralInt32Int32Int32>(nativeFn)(
+ x0, x1, x2, x3, x4, x5, x6);
+ setGPR32Result(ret);
+ break;
+ }
+ case js::jit::Args_Int64_General: {
+ int64_t ret =
+ reinterpret_cast<Prototype_Int64_General>(
+ nativeFn)(x0);
+ setGPR64Result(ret);
+ break;
+ }
+ case js::jit::Args_Int64_GeneralInt64: {
+ int64_t ret =
+ reinterpret_cast<Prototype_Int64_GeneralInt64>(
+ nativeFn)(x0, x1);
+ setGPR64Result(ret);
+ break;
+ }
+
+ default:
+ MOZ_CRASH("Unknown function type.");
+ }
+
+ // Nuke the volatile registers. x0-x7 are used as result registers, but except
+ // for x0, none are used in the above signatures.
+ for (int i = 1; i <= 18; i++) {
+ // Code feed 1 bad data
+ set_xreg(i, int64_t(0xc0defeed1badda7a));
+ }
+
+ // Assert that callee-saved registers are unchanged.
+ VIXL_ASSERT(xreg(19) == x19);
+ VIXL_ASSERT(xreg(20) == x20);
+ VIXL_ASSERT(xreg(21) == x21);
+ VIXL_ASSERT(xreg(22) == x22);
+ VIXL_ASSERT(xreg(23) == x23);
+ VIXL_ASSERT(xreg(24) == x24);
+ VIXL_ASSERT(xreg(25) == x25);
+ VIXL_ASSERT(xreg(26) == x26);
+ VIXL_ASSERT(xreg(27) == x27);
+ VIXL_ASSERT(xreg(28) == x28);
+ VIXL_ASSERT(xreg(29) == x29);
+
+ // Assert that the stack is unchanged.
+ VIXL_ASSERT(savedSP == get_sp());
+
+ // Simulate a return.
+ set_lr(savedLR);
+ set_pc((Instruction*)savedLR);
+ if (getenv("USE_DEBUGGER"))
+ printf("SVCRET\n");
+}
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+void
+Simulator::FlushICache()
+{
+ // Flush the caches recorded by the current thread as well as what got
+ // recorded from other threads before this call.
+ auto& vec = SimulatorProcess::getICacheFlushes(this);
+ for (auto& flush : vec) {
+ decoder_->FlushICache(flush.start, flush.length);
+ }
+ vec.clear();
+ pendingCacheRequests = false;
+}
+
+void CachingDecoder::Decode(const Instruction* instr) {
+ InstDecodedKind state;
+ if (lastPage_ && lastPage_->contains(instr)) {
+ state = lastPage_->decode(instr);
+ } else {
+ uintptr_t key = SinglePageDecodeCache::PageStart(instr);
+ ICacheMap::AddPtr p = iCache_.lookupForAdd(key);
+ if (p) {
+ lastPage_ = p->value();
+ state = lastPage_->decode(instr);
+ } else {
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ SinglePageDecodeCache* newPage = js_new<SinglePageDecodeCache>(instr);
+ if (!newPage || !iCache_.add(p, key, newPage)) {
+ oomUnsafe.crash("Simulator SinglePageDecodeCache");
+ }
+ lastPage_ = newPage;
+ state = InstDecodedKind::NotDecodedYet;
+ }
+ }
+
+ switch (state) {
+ case InstDecodedKind::NotDecodedYet: {
+ cachingDecoder_.setDecodePtr(lastPage_->decodePtr(instr));
+ this->Decoder::Decode(instr);
+ break;
+ }
+#define CASE(A) \
+ case InstDecodedKind::A: { \
+ Visit##A(instr); \
+ break; \
+ }
+
+ VISITOR_LIST(CASE)
+#undef CASE
+ }
+}
+
+void CachingDecoder::FlushICache(void* start, size_t size) {
+ MOZ_ASSERT(uintptr_t(start) % vixl::kInstructionSize == 0);
+ MOZ_ASSERT(size % vixl::kInstructionSize == 0);
+ const uint8_t* it = reinterpret_cast<const uint8_t*>(start);
+ const uint8_t* end = it + size;
+ SinglePageDecodeCache* last = nullptr;
+ for (; it < end; it += vixl::kInstructionSize) {
+ auto instr = reinterpret_cast<const Instruction*>(it);
+ if (last && last->contains(instr)) {
+ last->clearDecode(instr);
+ } else {
+ uintptr_t key = SinglePageDecodeCache::PageStart(instr);
+ ICacheMap::Ptr p = iCache_.lookup(key);
+ if (p) {
+ last = p->value();
+ last->clearDecode(instr);
+ }
+ }
+ }
+}
+#endif
+
+} // namespace vixl
+
+namespace js {
+namespace jit {
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+void SimulatorProcess::recordICacheFlush(void* start, size_t length) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ ICacheFlush range{start, length};
+ for (auto& s : singleton_->pendingFlushes_) {
+ if (!s.records.append(range)) {
+ oomUnsafe.crash("Simulator recordFlushICache");
+ }
+ }
+}
+
+void SimulatorProcess::membarrier() {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ for (auto& s : singleton_->pendingFlushes_) {
+ s.thread->pendingCacheRequests = true;
+ }
+}
+
+SimulatorProcess::ICacheFlushes& SimulatorProcess::getICacheFlushes(Simulator* sim) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ for (auto& s : singleton_->pendingFlushes_) {
+ if (s.thread == sim) {
+ return s.records;
+ }
+ }
+ MOZ_CRASH("Simulator is not registered in the SimulatorProcess");
+}
+
+bool SimulatorProcess::registerSimulator(Simulator* sim) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ ICacheFlushes empty;
+ SimFlushes simFlushes{sim, std::move(empty)};
+ return singleton_->pendingFlushes_.append(std::move(simFlushes));
+}
+
+void SimulatorProcess::unregisterSimulator(Simulator* sim) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ for (auto& s : singleton_->pendingFlushes_) {
+ if (s.thread == sim) {
+ singleton_->pendingFlushes_.erase(&s);
+ return;
+ }
+ }
+ MOZ_CRASH("Simulator is not registered in the SimulatorProcess");
+}
+#endif // !JS_CACHE_SIMULATOR_ARM64
+
+} // namespace jit
+} // namespace js
+
+vixl::Simulator* JSContext::simulator() const {
+ return simulator_;
+}
diff --git a/js/src/jit/arm64/vixl/Platform-vixl.h b/js/src/jit/arm64/vixl/Platform-vixl.h
new file mode 100644
index 0000000000..a4de54c785
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Platform-vixl.h
@@ -0,0 +1,39 @@
+// Copyright 2014, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_PLATFORM_H
+#define VIXL_PLATFORM_H
+
+// Define platform specific functionalities.
+#include <signal.h>
+
+#include "jstypes.h"
+
+namespace vixl {
+inline void HostBreakpoint(int64_t code = 0) { raise(SIGINT); }
+} // namespace vixl
+
+#endif
diff --git a/js/src/jit/arm64/vixl/README.md b/js/src/jit/arm64/vixl/README.md
new file mode 100644
index 0000000000..7111753279
--- /dev/null
+++ b/js/src/jit/arm64/vixl/README.md
@@ -0,0 +1,7 @@
+This directory is a mix of VIXL files for ARM64, and files added to integrate
+VIXL within SpiderMonkey MacroAssembler. Many of SpiderMonkey extension would be
+in files prefixed with Moz*, but some might be spread across imported files when
+convenient.
+
+VIXL upstream sources can be found at:
+https://git.linaro.org/arm/vixl.git/about/
diff --git a/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h b/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h
new file mode 100644
index 0000000000..4b9064a89b
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-Constants-vixl.h
@@ -0,0 +1,140 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
+#define VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
+
+namespace vixl {
+
+// Debug instructions.
+//
+// VIXL's macro-assembler and simulator support a few pseudo instructions to
+// make debugging easier. These pseudo instructions do not exist on real
+// hardware.
+//
+// TODO: Also consider allowing these pseudo-instructions to be disabled in the
+// simulator, so that users can check that the input is a valid native code.
+// (This isn't possible in all cases. Printf won't work, for example.)
+//
+// Each debug pseudo instruction is represented by a HLT instruction. The HLT
+// immediate field is used to identify the type of debug pseudo instruction.
+
+enum DebugHltOpcodes {
+ kPrintfOpcode,
+ kTraceOpcode,
+ kLogOpcode,
+ // Aliases.
+ kDebugHltFirstOpcode = kPrintfOpcode,
+ kDebugHltLastOpcode = kLogOpcode
+};
+
+// Each pseudo instruction uses a custom encoding for additional arguments, as
+// described below.
+
+// Unreachable - kUnreachableOpcode
+//
+// Instruction which should never be executed. This is used as a guard in parts
+// of the code that should not be reachable, such as in data encoded inline in
+// the instructions.
+
+// Printf - kPrintfOpcode
+// - arg_count: The number of arguments.
+// - arg_pattern: A set of PrintfArgPattern values, packed into two-bit fields.
+//
+// Simulate a call to printf.
+//
+// Floating-point and integer arguments are passed in separate sets of registers
+// in AAPCS64 (even for varargs functions), so it is not possible to determine
+// the type of each argument without some information about the values that were
+// passed in. This information could be retrieved from the printf format string,
+// but the format string is not trivial to parse so we encode the relevant
+// information with the HLT instruction.
+//
+// Also, the following registers are populated (as if for a native A64 call):
+// x0: The format string
+// x1-x7: Optional arguments, if type == CPURegister::kRegister
+// d0-d7: Optional arguments, if type == CPURegister::kFPRegister
+const unsigned kPrintfArgCountOffset = 1 * kInstructionSize;
+const unsigned kPrintfArgPatternListOffset = 2 * kInstructionSize;
+const unsigned kPrintfLength = 3 * kInstructionSize;
+
+const unsigned kPrintfMaxArgCount = 4;
+
+// The argument pattern is a set of two-bit-fields, each with one of the
+// following values:
+enum PrintfArgPattern {
+ kPrintfArgW = 1,
+ kPrintfArgX = 2,
+ // There is no kPrintfArgS because floats are always converted to doubles in C
+ // varargs calls.
+ kPrintfArgD = 3
+};
+static const unsigned kPrintfArgPatternBits = 2;
+
+// Trace - kTraceOpcode
+// - parameter: TraceParameter stored as a uint32_t
+// - command: TraceCommand stored as a uint32_t
+//
+// Allow for trace management in the generated code. This enables or disables
+// automatic tracing of the specified information for every simulated
+// instruction.
+const unsigned kTraceParamsOffset = 1 * kInstructionSize;
+const unsigned kTraceCommandOffset = 2 * kInstructionSize;
+const unsigned kTraceLength = 3 * kInstructionSize;
+
+// Trace parameters.
+enum TraceParameters {
+ LOG_DISASM = 1 << 0, // Log disassembly.
+ LOG_REGS = 1 << 1, // Log general purpose registers.
+ LOG_VREGS = 1 << 2, // Log NEON and floating-point registers.
+ LOG_SYSREGS = 1 << 3, // Log the flags and system registers.
+ LOG_WRITE = 1 << 4, // Log writes to memory.
+
+ LOG_NONE = 0,
+ LOG_STATE = LOG_REGS | LOG_VREGS | LOG_SYSREGS,
+ LOG_ALL = LOG_DISASM | LOG_STATE | LOG_WRITE
+};
+
+// Trace commands.
+enum TraceCommand {
+ TRACE_ENABLE = 1,
+ TRACE_DISABLE = 2
+};
+
+// Log - kLogOpcode
+// - parameter: TraceParameter stored as a uint32_t
+//
+// Print the specified information once. This mechanism is separate from Trace.
+// In particular, _all_ of the specified registers are printed, rather than just
+// the registers that the instruction writes.
+//
+// Any combination of the TraceParameters values can be used, except that
+// LOG_DISASM is not supported for Log.
+const unsigned kLogParamsOffset = 1 * kInstructionSize;
+const unsigned kLogLength = 2 * kInstructionSize;
+} // namespace vixl
+
+#endif // VIXL_A64_SIMULATOR_CONSTANTS_A64_H_
diff --git a/js/src/jit/arm64/vixl/Simulator-vixl.cpp b/js/src/jit/arm64/vixl/Simulator-vixl.cpp
new file mode 100644
index 0000000000..539d610b6a
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.cpp
@@ -0,0 +1,3993 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jstypes.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "jit/arm64/vixl/Simulator-vixl.h"
+
+#include <cmath>
+#include <string.h>
+
+#include "jit/AtomicOperations.h"
+
+namespace vixl {
+
+const Instruction* Simulator::kEndOfSimAddress = NULL;
+
+void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
+ int width = msb - lsb + 1;
+ VIXL_ASSERT(IsUintN(width, bits) || IsIntN(width, bits));
+
+ bits <<= lsb;
+ uint32_t mask = ((1 << width) - 1) << lsb;
+ VIXL_ASSERT((mask & write_ignore_mask_) == 0);
+
+ value_ = (value_ & ~mask) | (bits & mask);
+}
+
+
+SimSystemRegister SimSystemRegister::DefaultValueFor(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ return SimSystemRegister(0x00000000, NZCVWriteIgnoreMask);
+ case FPCR:
+ return SimSystemRegister(0x00000000, FPCRWriteIgnoreMask);
+ default:
+ VIXL_UNREACHABLE();
+ return SimSystemRegister();
+ }
+}
+
+
+void Simulator::Run() {
+ pc_modified_ = false;
+ while (pc_ != kEndOfSimAddress) {
+ ExecuteInstruction();
+ LogAllWrittenRegisters();
+ }
+}
+
+
+void Simulator::RunFrom(const Instruction* first) {
+ set_pc(first);
+ Run();
+}
+
+
+const char* Simulator::xreg_names[] = {
+"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
+"x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
+"x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23",
+"x24", "x25", "x26", "x27", "x28", "x29", "lr", "xzr", "sp"};
+
+const char* Simulator::wreg_names[] = {
+"w0", "w1", "w2", "w3", "w4", "w5", "w6", "w7",
+"w8", "w9", "w10", "w11", "w12", "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w24", "w25", "w26", "w27", "w28", "w29", "w30", "wzr", "wsp"};
+
+const char* Simulator::sreg_names[] = {
+"s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+"s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+"s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+"s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31"};
+
+const char* Simulator::dreg_names[] = {
+"d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+"d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+"d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+"d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+const char* Simulator::vreg_names[] = {
+"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+"v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+"v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+"v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+
+
+const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return wreg_names[code];
+}
+
+
+const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ // If the code represents the stack pointer, index the name after zr.
+ if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
+ code = kZeroRegCode + 1;
+ }
+ return xreg_names[code];
+}
+
+
+const char* Simulator::SRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfFPRegisters);
+ return sreg_names[code];
+}
+
+
+const char* Simulator::DRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfFPRegisters);
+ return dreg_names[code];
+}
+
+
+const char* Simulator::VRegNameForCode(unsigned code) {
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ return vreg_names[code];
+}
+
+
+#define COLOUR(colour_code) "\033[0;" colour_code "m"
+#define COLOUR_BOLD(colour_code) "\033[1;" colour_code "m"
+#define NORMAL ""
+#define GREY "30"
+#define RED "31"
+#define GREEN "32"
+#define YELLOW "33"
+#define BLUE "34"
+#define MAGENTA "35"
+#define CYAN "36"
+#define WHITE "37"
+void Simulator::set_coloured_trace(bool value) {
+ coloured_trace_ = value;
+
+ clr_normal = value ? COLOUR(NORMAL) : "";
+ clr_flag_name = value ? COLOUR_BOLD(WHITE) : "";
+ clr_flag_value = value ? COLOUR(NORMAL) : "";
+ clr_reg_name = value ? COLOUR_BOLD(CYAN) : "";
+ clr_reg_value = value ? COLOUR(CYAN) : "";
+ clr_vreg_name = value ? COLOUR_BOLD(MAGENTA) : "";
+ clr_vreg_value = value ? COLOUR(MAGENTA) : "";
+ clr_memory_address = value ? COLOUR_BOLD(BLUE) : "";
+ clr_warning = value ? COLOUR_BOLD(YELLOW) : "";
+ clr_warning_message = value ? COLOUR(YELLOW) : "";
+ clr_printf = value ? COLOUR(GREEN) : "";
+}
+#undef COLOUR
+#undef COLOUR_BOLD
+#undef NORMAL
+#undef GREY
+#undef RED
+#undef GREEN
+#undef YELLOW
+#undef BLUE
+#undef MAGENTA
+#undef CYAN
+#undef WHITE
+
+
+void Simulator::set_trace_parameters(int parameters) {
+ bool disasm_before = trace_parameters_ & LOG_DISASM;
+ trace_parameters_ = parameters;
+ bool disasm_after = trace_parameters_ & LOG_DISASM;
+
+ if (disasm_before != disasm_after) {
+ if (disasm_after) {
+ decoder_->InsertVisitorBefore(print_disasm_, this);
+ } else {
+ decoder_->RemoveVisitor(print_disasm_);
+ }
+ }
+}
+
+
+void Simulator::set_instruction_stats(bool value) {
+ if (instrumentation_ == nullptr) {
+ return;
+ }
+
+ if (value != instruction_stats_) {
+ if (value) {
+ decoder_->AppendVisitor(instrumentation_);
+ } else {
+ decoder_->RemoveVisitor(instrumentation_);
+ }
+ instruction_stats_ = value;
+ }
+}
+
+// Helpers ---------------------------------------------------------------------
+uint64_t Simulator::AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ uint64_t left,
+ uint64_t right,
+ int carry_in) {
+ VIXL_ASSERT((carry_in == 0) || (carry_in == 1));
+ VIXL_ASSERT((reg_size == kXRegSize) || (reg_size == kWRegSize));
+
+ uint64_t max_uint = (reg_size == kWRegSize) ? kWMaxUInt : kXMaxUInt;
+ uint64_t reg_mask = (reg_size == kWRegSize) ? kWRegMask : kXRegMask;
+ uint64_t sign_mask = (reg_size == kWRegSize) ? kWSignMask : kXSignMask;
+
+ left &= reg_mask;
+ right &= reg_mask;
+ uint64_t result = (left + right + carry_in) & reg_mask;
+
+ if (set_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+
+ // Compute the C flag by comparing the result to the max unsigned integer.
+ uint64_t max_uint_2op = max_uint - carry_in;
+ bool C = (left > max_uint_2op) || ((max_uint_2op - left) < right);
+ nzcv().SetC(C ? 1 : 0);
+
+ // Overflow iff the sign bit is the same for the two inputs and different
+ // for the result.
+ uint64_t left_sign = left & sign_mask;
+ uint64_t right_sign = right & sign_mask;
+ uint64_t result_sign = result & sign_mask;
+ bool V = (left_sign == right_sign) && (left_sign != result_sign);
+ nzcv().SetV(V ? 1 : 0);
+
+ LogSystemRegister(NZCV);
+ }
+ return result;
+}
+
+
+int64_t Simulator::ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount) {
+ if (amount == 0) {
+ return value;
+ }
+ int64_t mask = reg_size == kXRegSize ? kXRegMask : kWRegMask;
+ switch (shift_type) {
+ case LSL:
+ return (value << amount) & mask;
+ case LSR:
+ return static_cast<uint64_t>(value) >> amount;
+ case ASR: {
+ // Shift used to restore the sign.
+ unsigned s_shift = kXRegSize - reg_size;
+ // Value with its sign restored.
+ int64_t s_value = (value << s_shift) >> s_shift;
+ return (s_value >> amount) & mask;
+ }
+ case ROR: {
+ if (reg_size == kWRegSize) {
+ value &= kWRegMask;
+ }
+ return (static_cast<uint64_t>(value) >> amount) |
+ ((value & ((INT64_C(1) << amount) - 1)) <<
+ (reg_size - amount));
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ return 0;
+ }
+}
+
+
+int64_t Simulator::ExtendValue(unsigned reg_size,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift) {
+ switch (extend_type) {
+ case UXTB:
+ value &= kByteMask;
+ break;
+ case UXTH:
+ value &= kHalfWordMask;
+ break;
+ case UXTW:
+ value &= kWordMask;
+ break;
+ case SXTB:
+ value = (value << 56) >> 56;
+ break;
+ case SXTH:
+ value = (value << 48) >> 48;
+ break;
+ case SXTW:
+ value = (value << 32) >> 32;
+ break;
+ case UXTX:
+ case SXTX:
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ int64_t mask = (reg_size == kXRegSize) ? kXRegMask : kWRegMask;
+ return (value << left_shift) & mask;
+}
+
+
+void Simulator::FPCompare(double val0, double val1, FPTrapFlags trap) {
+ AssertSupportedFPCR();
+
+ // TODO: This assumes that the C++ implementation handles comparisons in the
+ // way that we expect (as per AssertSupportedFPCR()).
+ bool process_exception = false;
+ if ((std::isnan(val0) != 0) || (std::isnan(val1) != 0)) {
+ nzcv().SetRawValue(FPUnorderedFlag);
+ if (IsSignallingNaN(val0) || IsSignallingNaN(val1) ||
+ (trap == EnableTrap)) {
+ process_exception = true;
+ }
+ } else if (val0 < val1) {
+ nzcv().SetRawValue(FPLessThanFlag);
+ } else if (val0 > val1) {
+ nzcv().SetRawValue(FPGreaterThanFlag);
+ } else if (val0 == val1) {
+ nzcv().SetRawValue(FPEqualFlag);
+ } else {
+ VIXL_UNREACHABLE();
+ }
+ LogSystemRegister(NZCV);
+ if (process_exception) FPProcessException();
+}
+
+
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormatForSize(
+ unsigned reg_size, unsigned lane_size) {
+ VIXL_ASSERT(reg_size >= lane_size);
+
+ uint32_t format = 0;
+ if (reg_size != lane_size) {
+ switch (reg_size) {
+ default: VIXL_UNREACHABLE(); break;
+ case kQRegSizeInBytes: format = kPrintRegAsQVector; break;
+ case kDRegSizeInBytes: format = kPrintRegAsDVector; break;
+ }
+ }
+
+ switch (lane_size) {
+ default: VIXL_UNREACHABLE(); break;
+ case kQRegSizeInBytes: format |= kPrintReg1Q; break;
+ case kDRegSizeInBytes: format |= kPrintReg1D; break;
+ case kSRegSizeInBytes: format |= kPrintReg1S; break;
+ case kHRegSizeInBytes: format |= kPrintReg1H; break;
+ case kBRegSizeInBytes: format |= kPrintReg1B; break;
+ }
+ // These sizes would be duplicate case labels.
+ VIXL_STATIC_ASSERT(kXRegSizeInBytes == kDRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kWRegSizeInBytes == kSRegSizeInBytes);
+ VIXL_STATIC_ASSERT(kPrintXReg == kPrintReg1D);
+ VIXL_STATIC_ASSERT(kPrintWReg == kPrintReg1S);
+
+ return static_cast<PrintRegisterFormat>(format);
+}
+
+
+Simulator::PrintRegisterFormat Simulator::GetPrintRegisterFormat(
+ VectorFormat vform) {
+ switch (vform) {
+ default: VIXL_UNREACHABLE(); return kPrintReg16B;
+ case kFormat16B: return kPrintReg16B;
+ case kFormat8B: return kPrintReg8B;
+ case kFormat8H: return kPrintReg8H;
+ case kFormat4H: return kPrintReg4H;
+ case kFormat4S: return kPrintReg4S;
+ case kFormat2S: return kPrintReg2S;
+ case kFormat2D: return kPrintReg2D;
+ case kFormat1D: return kPrintReg1D;
+ }
+}
+
+
+void Simulator::PrintWrittenRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ if (registers_[i].WrittenSinceLastLog()) PrintRegister(i);
+ }
+}
+
+
+void Simulator::PrintWrittenVRegisters() {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
+ // At this point there is no type information, so print as a raw 1Q.
+ if (vregisters_[i].WrittenSinceLastLog()) PrintVRegister(i, kPrintReg1Q);
+ }
+}
+
+
+void Simulator::PrintSystemRegisters() {
+ PrintSystemRegister(NZCV);
+ PrintSystemRegister(FPCR);
+}
+
+
+void Simulator::PrintRegisters() {
+ for (unsigned i = 0; i < kNumberOfRegisters; i++) {
+ PrintRegister(i);
+ }
+}
+
+
+void Simulator::PrintVRegisters() {
+ for (unsigned i = 0; i < kNumberOfVRegisters; i++) {
+ // At this point there is no type information, so print as a raw 1Q.
+ PrintVRegister(i, kPrintReg1Q);
+ }
+}
+
+
+// Print a register's name and raw value.
+//
+// Only the least-significant `size_in_bytes` bytes of the register are printed,
+// but the value is aligned as if the whole register had been printed.
+//
+// For typical register updates, size_in_bytes should be set to kXRegSizeInBytes
+// -- the default -- so that the whole register is printed. Other values of
+// size_in_bytes are intended for use when the register hasn't actually been
+// updated (such as in PrintWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a memory access annotation).
+void Simulator::PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
+ int size_in_bytes) {
+ // The template for all supported sizes.
+ // "# x{code}: 0xffeeddccbbaa9988"
+ // "# w{code}: 0xbbaa9988"
+ // "# w{code}<15:0>: 0x9988"
+ // "# w{code}<7:0>: 0x88"
+ unsigned padding_chars = (kXRegSizeInBytes - size_in_bytes) * 2;
+
+ const char * name = "";
+ const char * suffix = "";
+ switch (size_in_bytes) {
+ case kXRegSizeInBytes: name = XRegNameForCode(code, r31mode); break;
+ case kWRegSizeInBytes: name = WRegNameForCode(code, r31mode); break;
+ case 2:
+ name = WRegNameForCode(code, r31mode);
+ suffix = "<15:0>";
+ padding_chars -= strlen(suffix);
+ break;
+ case 1:
+ name = WRegNameForCode(code, r31mode);
+ suffix = "<7:0>";
+ padding_chars -= strlen(suffix);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+ fprintf(stream_, "# %s%5s%s: ", clr_reg_name, name, suffix);
+
+ // Print leading padding spaces.
+ VIXL_ASSERT(padding_chars < (kXRegSizeInBytes * 2));
+ for (unsigned i = 0; i < padding_chars; i++) {
+ putc(' ', stream_);
+ }
+
+ // Print the specified bits in hexadecimal format.
+ uint64_t bits = reg<uint64_t>(code, r31mode);
+ bits &= kXRegMask >> ((kXRegSizeInBytes - size_in_bytes) * 8);
+ VIXL_STATIC_ASSERT(sizeof(bits) == kXRegSizeInBytes);
+
+ int chars = size_in_bytes * 2;
+ fprintf(stream_, "%s0x%0*" PRIx64 "%s",
+ clr_reg_value, chars, bits, clr_normal);
+}
+
+
+void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
+ registers_[code].NotifyRegisterLogged();
+
+ // Don't print writes into xzr.
+ if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+
+ // The template for all x and w registers:
+ // "# x{code}: 0x{value}"
+ // "# w{code}: 0x{value}"
+
+ PrintRegisterRawHelper(code, r31mode);
+ fprintf(stream_, "\n");
+}
+
+
+// Print a register's name and raw value.
+//
+// The `bytes` and `lsb` arguments can be used to limit the bytes that are
+// printed. These arguments are intended for use in cases where register hasn't
+// actually been updated (such as in PrintVWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a floating-point interpretation or a memory access annotation).
+void Simulator::PrintVRegisterRawHelper(unsigned code, int bytes, int lsb) {
+ // The template for vector types:
+ // "# v{code}: 0xffeeddccbbaa99887766554433221100".
+ // An example with bytes=4 and lsb=8:
+ // "# v{code}: 0xbbaa9988 ".
+ fprintf(stream_, "# %s%5s: %s",
+ clr_vreg_name, VRegNameForCode(code), clr_vreg_value);
+
+ int msb = lsb + bytes - 1;
+ int byte = kQRegSizeInBytes - 1;
+
+ // Print leading padding spaces. (Two spaces per byte.)
+ while (byte > msb) {
+ fprintf(stream_, " ");
+ byte--;
+ }
+
+ // Print the specified part of the value, byte by byte.
+ qreg_t rawbits = qreg(code);
+ fprintf(stream_, "0x");
+ while (byte >= lsb) {
+ fprintf(stream_, "%02x", rawbits.val[byte]);
+ byte--;
+ }
+
+ // Print trailing padding spaces.
+ while (byte >= 0) {
+ fprintf(stream_, " ");
+ byte--;
+ }
+ fprintf(stream_, "%s", clr_normal);
+}
+
+
+// Print each of the specified lanes of a register as a float or double value.
+//
+// The `lane_count` and `lslane` arguments can be used to limit the lanes that
+// are printed. These arguments are intended for use in cases where register
+// hasn't actually been updated (such as in PrintVWrite).
+//
+// No newline is printed. This allows the caller to print more details (such as
+// a memory access annotation).
+void Simulator::PrintVRegisterFPHelper(unsigned code,
+ unsigned lane_size_in_bytes,
+ int lane_count,
+ int rightmost_lane) {
+ VIXL_ASSERT((lane_size_in_bytes == kSRegSizeInBytes) ||
+ (lane_size_in_bytes == kDRegSizeInBytes));
+
+ unsigned msb = ((lane_count + rightmost_lane) * lane_size_in_bytes);
+ VIXL_ASSERT(msb <= kQRegSizeInBytes);
+
+ // For scalar types ((lane_count == 1) && (rightmost_lane == 0)), a register
+ // name is used:
+ // " (s{code}: {value})"
+ // " (d{code}: {value})"
+ // For vector types, "..." is used to represent one or more omitted lanes.
+ // " (..., {value}, {value}, ...)"
+ if ((lane_count == 1) && (rightmost_lane == 0)) {
+ const char * name =
+ (lane_size_in_bytes == kSRegSizeInBytes) ? SRegNameForCode(code)
+ : DRegNameForCode(code);
+ fprintf(stream_, " (%s%s: ", clr_vreg_name, name);
+ } else {
+ if (msb < (kQRegSizeInBytes - 1)) {
+ fprintf(stream_, " (..., ");
+ } else {
+ fprintf(stream_, " (");
+ }
+ }
+
+ // Print the list of values.
+ const char * separator = "";
+ int leftmost_lane = rightmost_lane + lane_count - 1;
+ for (int lane = leftmost_lane; lane >= rightmost_lane; lane--) {
+ double value =
+ (lane_size_in_bytes == kSRegSizeInBytes) ? vreg(code).Get<float>(lane)
+ : vreg(code).Get<double>(lane);
+ fprintf(stream_, "%s%s%#g%s", separator, clr_vreg_value, value, clr_normal);
+ separator = ", ";
+ }
+
+ if (rightmost_lane > 0) {
+ fprintf(stream_, ", ...");
+ }
+ fprintf(stream_, ")");
+}
+
+
+void Simulator::PrintVRegister(unsigned code, PrintRegisterFormat format) {
+ vregisters_[code].NotifyRegisterLogged();
+
+ int lane_size_log2 = format & kPrintRegLaneSizeMask;
+
+ int reg_size_log2;
+ if (format & kPrintRegAsQVector) {
+ reg_size_log2 = kQRegSizeInBytesLog2;
+ } else if (format & kPrintRegAsDVector) {
+ reg_size_log2 = kDRegSizeInBytesLog2;
+ } else {
+ // Scalar types.
+ reg_size_log2 = lane_size_log2;
+ }
+
+ int lane_count = 1 << (reg_size_log2 - lane_size_log2);
+ int lane_size = 1 << lane_size_log2;
+
+ // The template for vector types:
+ // "# v{code}: 0x{rawbits} (..., {value}, ...)".
+ // The template for scalar types:
+ // "# v{code}: 0x{rawbits} ({reg}:{value})".
+ // The values in parentheses after the bit representations are floating-point
+ // interpretations. They are displayed only if the kPrintVRegAsFP bit is set.
+
+ PrintVRegisterRawHelper(code);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(code, lane_size, lane_count);
+ }
+
+ fprintf(stream_, "\n");
+}
+
+
+void Simulator::PrintSystemRegister(SystemRegister id) {
+ switch (id) {
+ case NZCV:
+ fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
+ clr_flag_name, clr_flag_value,
+ nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+ clr_normal);
+ break;
+ case FPCR: {
+ static const char * rmode[] = {
+ "0b00 (Round to Nearest)",
+ "0b01 (Round towards Plus Infinity)",
+ "0b10 (Round towards Minus Infinity)",
+ "0b11 (Round towards Zero)"
+ };
+ VIXL_ASSERT(fpcr().RMode() < (sizeof(rmode) / sizeof(rmode[0])));
+ fprintf(stream_,
+ "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+ clr_flag_name, clr_flag_value,
+ fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+ clr_normal);
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::PrintRead(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format) {
+ registers_[reg_code].NotifyRegisterLogged();
+
+ USE(format);
+
+ // The template is "# {reg}: 0x{value} <- {address}".
+ PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister);
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintVRead(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format,
+ unsigned lane) {
+ vregisters_[reg_code].NotifyRegisterLogged();
+
+ // The template is "# v{code}: 0x{rawbits} <- address".
+ PrintVRegisterRawHelper(reg_code);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(reg_code, GetPrintRegLaneSizeInBytes(format),
+ GetPrintRegLaneCount(format), lane);
+ }
+ fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintWrite(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format) {
+ VIXL_ASSERT(GetPrintRegLaneCount(format) == 1);
+
+ // The template is "# v{code}: 0x{value} -> {address}". To keep the trace tidy
+ // and readable, the value is aligned with the values in the register trace.
+ PrintRegisterRawHelper(reg_code, Reg31IsZeroRegister,
+ GetPrintRegSizeInBytes(format));
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintVWrite(uintptr_t address,
+ unsigned reg_code,
+ PrintRegisterFormat format,
+ unsigned lane) {
+ // The templates:
+ // "# v{code}: 0x{rawbits} -> {address}"
+ // "# v{code}: 0x{rawbits} (..., {value}, ...) -> {address}".
+ // "# v{code}: 0x{rawbits} ({reg}:{value}) -> {address}"
+ // Because this trace doesn't represent a change to the source register's
+ // value, only the relevant part of the value is printed. To keep the trace
+ // tidy and readable, the raw value is aligned with the other values in the
+ // register trace.
+ int lane_count = GetPrintRegLaneCount(format);
+ int lane_size = GetPrintRegLaneSizeInBytes(format);
+ int reg_size = GetPrintRegSizeInBytes(format);
+ PrintVRegisterRawHelper(reg_code, reg_size, lane_size * lane);
+ if (format & kPrintRegAsFP) {
+ PrintVRegisterFPHelper(reg_code, lane_size, lane_count, lane);
+ }
+ fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+ clr_memory_address, address, clr_normal);
+}
+
+
+// Visitors---------------------------------------------------------------------
+
+void Simulator::VisitUnimplemented(const Instruction* instr) {
+ printf("Unimplemented instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<const void*>(instr), instr->InstructionBits());
+ VIXL_UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitUnallocated(const Instruction* instr) {
+ printf("Unallocated instruction at %p: 0x%08" PRIx32 "\n",
+ reinterpret_cast<const void*>(instr), instr->InstructionBits());
+ VIXL_UNIMPLEMENTED();
+}
+
+
+void Simulator::VisitPCRelAddressing(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(PCRelAddressingMask) == ADR) ||
+ (instr->Mask(PCRelAddressingMask) == ADRP));
+
+ set_reg(instr->Rd(), instr->ImmPCOffsetTarget());
+}
+
+
+void Simulator::VisitUnconditionalBranch(const Instruction* instr) {
+ switch (instr->Mask(UnconditionalBranchMask)) {
+ case BL:
+ set_lr(instr->NextInstruction());
+ VIXL_FALLTHROUGH();
+ case B:
+ set_pc(instr->ImmPCOffsetTarget());
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitConditionalBranch(const Instruction* instr) {
+ VIXL_ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+ if (ConditionPassed(instr->ConditionBranch())) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitUnconditionalBranchToRegister(const Instruction* instr) {
+ const Instruction* target = Instruction::Cast(xreg(instr->Rn()));
+
+ switch (instr->Mask(UnconditionalBranchToRegisterMask)) {
+ case BLR:
+ set_lr(instr->NextInstruction());
+ VIXL_FALLTHROUGH();
+ case BR:
+ case RET: set_pc(target); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitTestBranch(const Instruction* instr) {
+ unsigned bit_pos = (instr->ImmTestBranchBit5() << 5) |
+ instr->ImmTestBranchBit40();
+ bool bit_zero = ((xreg(instr->Rt()) >> bit_pos) & 1) == 0;
+ bool take_branch = false;
+ switch (instr->Mask(TestBranchMask)) {
+ case TBZ: take_branch = bit_zero; break;
+ case TBNZ: take_branch = !bit_zero; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::VisitCompareBranch(const Instruction* instr) {
+ unsigned rt = instr->Rt();
+ bool take_branch = false;
+ switch (instr->Mask(CompareBranchMask)) {
+ case CBZ_w: take_branch = (wreg(rt) == 0); break;
+ case CBZ_x: take_branch = (xreg(rt) == 0); break;
+ case CBNZ_w: take_branch = (wreg(rt) != 0); break;
+ case CBNZ_x: take_branch = (xreg(rt) != 0); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ if (take_branch) {
+ set_pc(instr->ImmPCOffsetTarget());
+ }
+}
+
+
+void Simulator::AddSubHelper(const Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ bool set_flags = instr->FlagsUpdate();
+ int64_t new_val = 0;
+ Instr operation = instr->Mask(AddSubOpMask);
+
+ switch (operation) {
+ case ADD:
+ case ADDS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ op2);
+ break;
+ }
+ case SUB:
+ case SUBS: {
+ new_val = AddWithCarry(reg_size,
+ set_flags,
+ reg(reg_size, instr->Rn(), instr->RnMode()),
+ ~op2,
+ 1);
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ set_reg(reg_size, instr->Rd(), new_val, LogRegWrites, instr->RdMode());
+}
+
+
+void Simulator::VisitAddSubShifted(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ShiftOperand(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Shift>(instr->ShiftDP()),
+ instr->ImmDPShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubImmediate(const Instruction* instr) {
+ int64_t op2 = instr->ImmAddSub() << ((instr->ShiftAddSub() == 1) ? 12 : 0);
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubExtended(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = ExtendValue(reg_size,
+ reg(reg_size, instr->Rm()),
+ static_cast<Extend>(instr->ExtendMode()),
+ instr->ImmExtendShift());
+ AddSubHelper(instr, op2);
+}
+
+
+void Simulator::VisitAddSubWithCarry(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op2 = reg(reg_size, instr->Rm());
+ int64_t new_val;
+
+ if ((instr->Mask(AddSubOpMask) == SUB) || instr->Mask(AddSubOpMask) == SUBS) {
+ op2 = ~op2;
+ }
+
+ new_val = AddWithCarry(reg_size,
+ instr->FlagsUpdate(),
+ reg(reg_size, instr->Rn()),
+ op2,
+ C());
+
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitLogicalShifted(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ Shift shift_type = static_cast<Shift>(instr->ShiftDP());
+ unsigned shift_amount = instr->ImmDPShift();
+ int64_t op2 = ShiftOperand(reg_size, reg(reg_size, instr->Rm()), shift_type,
+ shift_amount);
+ if (instr->Mask(NOT) == NOT) {
+ op2 = ~op2;
+ }
+ LogicalHelper(instr, op2);
+}
+
+
+void Simulator::VisitLogicalImmediate(const Instruction* instr) {
+ LogicalHelper(instr, instr->ImmLogical());
+}
+
+
+void Simulator::LogicalHelper(const Instruction* instr, int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+ int64_t result = 0;
+ bool update_flags = false;
+
+ // Switch on the logical operation, stripping out the NOT bit, as it has a
+ // different meaning for logical immediate instructions.
+ switch (instr->Mask(LogicalOpMask & ~NOT)) {
+ case ANDS: update_flags = true; VIXL_FALLTHROUGH();
+ case AND: result = op1 & op2; break;
+ case ORR: result = op1 | op2; break;
+ case EOR: result = op1 ^ op2; break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+
+ if (update_flags) {
+ nzcv().SetN(CalcNFlag(result, reg_size));
+ nzcv().SetZ(CalcZFlag(result));
+ nzcv().SetC(0);
+ nzcv().SetV(0);
+ LogSystemRegister(NZCV);
+ }
+
+ set_reg(reg_size, instr->Rd(), result, LogRegWrites, instr->RdMode());
+}
+
+
+void Simulator::VisitConditionalCompareRegister(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ ConditionalCompareHelper(instr, reg(reg_size, instr->Rm()));
+}
+
+
+void Simulator::VisitConditionalCompareImmediate(const Instruction* instr) {
+ ConditionalCompareHelper(instr, instr->ImmCondCmp());
+}
+
+
+void Simulator::ConditionalCompareHelper(const Instruction* instr,
+ int64_t op2) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t op1 = reg(reg_size, instr->Rn());
+
+ if (ConditionPassed(instr->Condition())) {
+ // If the condition passes, set the status flags to the result of comparing
+ // the operands.
+ if (instr->Mask(ConditionalCompareMask) == CCMP) {
+ AddWithCarry(reg_size, true, op1, ~op2, 1);
+ } else {
+ VIXL_ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+ AddWithCarry(reg_size, true, op1, op2, 0);
+ }
+ } else {
+ // If the condition fails, set the status flags to the nzcv immediate.
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+}
+
+
+void Simulator::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
+ int offset = instr->ImmLSUnsigned() << instr->SizeLS();
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+
+void Simulator::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), Offset);
+}
+
+
+void Simulator::VisitLoadStorePreIndex(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePostIndex(const Instruction* instr) {
+ LoadStoreHelper(instr, instr->ImmLS(), PostIndex);
+}
+
+
+void Simulator::VisitLoadStoreRegisterOffset(const Instruction* instr) {
+ Extend ext = static_cast<Extend>(instr->ExtendMode());
+ VIXL_ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+ unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
+
+ int64_t offset = ExtendValue(kXRegSize, xreg(instr->Rm()), ext,
+ shift_amount);
+ LoadStoreHelper(instr, offset, Offset);
+}
+
+template<typename T>
+static T Faulted() {
+ return ~0;
+}
+
+template<>
+Simulator::qreg_t Faulted() {
+ static_assert(kQRegSizeInBytes == 16, "Known constraint");
+ static Simulator::qreg_t dummy = { {
+ 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 255, 255, 255
+ } };
+ return dummy;
+}
+
+template<typename T> T
+Simulator::Read(uintptr_t address)
+{
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, sizeof(T)))
+ return Faulted<T>();
+ return Memory::Read<T>(address);
+}
+
+template <typename T> void
+Simulator::Write(uintptr_t address, T value)
+{
+ address = Memory::AddressUntag(address);
+ if (handle_wasm_seg_fault(address, sizeof(T)))
+ return;
+ Memory::Write<T>(address, value);
+}
+
+void Simulator::LoadStoreHelper(const Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode) {
+ unsigned srcdst = instr->Rt();
+ uintptr_t address = AddressModeHelper(instr->Rn(), offset, addrmode);
+
+ LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreMask));
+ switch (op) {
+ case LDRB_w:
+ set_wreg(srcdst, Read<uint8_t>(address), NoRegLog); break;
+ case LDRH_w:
+ set_wreg(srcdst, Read<uint16_t>(address), NoRegLog); break;
+ case LDR_w:
+ set_wreg(srcdst, Read<uint32_t>(address), NoRegLog); break;
+ case LDR_x:
+ set_xreg(srcdst, Read<uint64_t>(address), NoRegLog); break;
+ case LDRSB_w:
+ set_wreg(srcdst, Read<int8_t>(address), NoRegLog); break;
+ case LDRSH_w:
+ set_wreg(srcdst, Read<int16_t>(address), NoRegLog); break;
+ case LDRSB_x:
+ set_xreg(srcdst, Read<int8_t>(address), NoRegLog); break;
+ case LDRSH_x:
+ set_xreg(srcdst, Read<int16_t>(address), NoRegLog); break;
+ case LDRSW_x:
+ set_xreg(srcdst, Read<int32_t>(address), NoRegLog); break;
+ case LDR_b:
+ set_breg(srcdst, Read<uint8_t>(address), NoRegLog); break;
+ case LDR_h:
+ set_hreg(srcdst, Read<uint16_t>(address), NoRegLog); break;
+ case LDR_s:
+ set_sreg(srcdst, Read<float>(address), NoRegLog); break;
+ case LDR_d:
+ set_dreg(srcdst, Read<double>(address), NoRegLog); break;
+ case LDR_q:
+ set_qreg(srcdst, Read<qreg_t>(address), NoRegLog); break;
+
+ case STRB_w: Write<uint8_t>(address, wreg(srcdst)); break;
+ case STRH_w: Write<uint16_t>(address, wreg(srcdst)); break;
+ case STR_w: Write<uint32_t>(address, wreg(srcdst)); break;
+ case STR_x: Write<uint64_t>(address, xreg(srcdst)); break;
+ case STR_b: Write<uint8_t>(address, breg(srcdst)); break;
+ case STR_h: Write<uint16_t>(address, hreg(srcdst)); break;
+ case STR_s: Write<float>(address, sreg(srcdst)); break;
+ case STR_d: Write<double>(address, dreg(srcdst)); break;
+ case STR_q: Write<qreg_t>(address, qreg(srcdst)); break;
+
+ // Ignore prfm hint instructions.
+ case PRFM: break;
+
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ unsigned access_size = 1 << instr->SizeLS();
+ if (instr->IsLoad()) {
+ if ((op == LDR_s) || (op == LDR_d)) {
+ LogVRead(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if ((op == LDR_b) || (op == LDR_h) || (op == LDR_q)) {
+ LogVRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ } else {
+ LogRead(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ }
+ } else {
+ if ((op == STR_s) || (op == STR_d)) {
+ LogVWrite(address, srcdst, GetPrintRegisterFormatForSizeFP(access_size));
+ } else if ((op == STR_b) || (op == STR_h) || (op == STR_q)) {
+ LogVWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ } else {
+ LogWrite(address, srcdst, GetPrintRegisterFormatForSize(access_size));
+ }
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+void Simulator::VisitLoadStorePairOffset(const Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::VisitLoadStorePairPreIndex(const Instruction* instr) {
+ LoadStorePairHelper(instr, PreIndex);
+}
+
+
+void Simulator::VisitLoadStorePairPostIndex(const Instruction* instr) {
+ LoadStorePairHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitLoadStorePairNonTemporal(const Instruction* instr) {
+ LoadStorePairHelper(instr, Offset);
+}
+
+
+void Simulator::LoadStorePairHelper(const Instruction* instr,
+ AddrMode addrmode) {
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ int element_size = 1 << instr->SizeLSPair();
+ int64_t offset = instr->ImmLSPair() * element_size;
+ uintptr_t address = AddressModeHelper(instr->Rn(), offset, addrmode);
+ uintptr_t address2 = address + element_size;
+
+ LoadStorePairOp op =
+ static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
+
+ // 'rt' and 'rt2' can only be aliased for stores.
+ VIXL_ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+
+ switch (op) {
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We
+ // will print a more detailed log.
+ case LDP_w: {
+ set_wreg(rt, Read<uint32_t>(address), NoRegLog);
+ set_wreg(rt2, Read<uint32_t>(address2), NoRegLog);
+ break;
+ }
+ case LDP_s: {
+ set_sreg(rt, Read<float>(address), NoRegLog);
+ set_sreg(rt2, Read<float>(address2), NoRegLog);
+ break;
+ }
+ case LDP_x: {
+ set_xreg(rt, Read<uint64_t>(address), NoRegLog);
+ set_xreg(rt2, Read<uint64_t>(address2), NoRegLog);
+ break;
+ }
+ case LDP_d: {
+ set_dreg(rt, Read<double>(address), NoRegLog);
+ set_dreg(rt2, Read<double>(address2), NoRegLog);
+ break;
+ }
+ case LDP_q: {
+ set_qreg(rt, Read<qreg_t>(address), NoRegLog);
+ set_qreg(rt2, Read<qreg_t>(address2), NoRegLog);
+ break;
+ }
+ case LDPSW_x: {
+ set_xreg(rt, Read<int32_t>(address), NoRegLog);
+ set_xreg(rt2, Read<int32_t>(address2), NoRegLog);
+ break;
+ }
+ case STP_w: {
+ Write<uint32_t>(address, wreg(rt));
+ Write<uint32_t>(address2, wreg(rt2));
+ break;
+ }
+ case STP_s: {
+ Write<float>(address, sreg(rt));
+ Write<float>(address2, sreg(rt2));
+ break;
+ }
+ case STP_x: {
+ Write<uint64_t>(address, xreg(rt));
+ Write<uint64_t>(address2, xreg(rt2));
+ break;
+ }
+ case STP_d: {
+ Write<double>(address, dreg(rt));
+ Write<double>(address2, dreg(rt2));
+ break;
+ }
+ case STP_q: {
+ Write<qreg_t>(address, qreg(rt));
+ Write<qreg_t>(address2, qreg(rt2));
+ break;
+ }
+ default: VIXL_UNREACHABLE();
+ }
+
+ // Print a detailed trace (including the memory address) instead of the basic
+ // register:value trace generated by set_*reg().
+ if (instr->IsLoad()) {
+ if ((op == LDP_s) || (op == LDP_d)) {
+ LogVRead(address, rt, GetPrintRegisterFormatForSizeFP(element_size));
+ LogVRead(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size));
+ } else if (op == LDP_q) {
+ LogVRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogVRead(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ } else {
+ LogRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogRead(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ }
+ } else {
+ if ((op == STP_s) || (op == STP_d)) {
+ LogVWrite(address, rt, GetPrintRegisterFormatForSizeFP(element_size));
+ LogVWrite(address2, rt2, GetPrintRegisterFormatForSizeFP(element_size));
+ } else if (op == STP_q) {
+ LogVWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogVWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ } else {
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ LogWrite(address2, rt2, GetPrintRegisterFormatForSize(element_size));
+ }
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+void Simulator::PrintExclusiveAccessWarning() {
+ if (print_exclusive_access_warning_) {
+ fprintf(
+ stderr,
+ "%sWARNING:%s VIXL simulator support for load-/store-/clear-exclusive "
+ "instructions is limited. Refer to the README for details.%s\n",
+ clr_warning, clr_warning_message, clr_normal);
+ print_exclusive_access_warning_ = false;
+ }
+}
+
+
+void Simulator::VisitLoadStoreExclusive(const Instruction* instr) {
+ PrintExclusiveAccessWarning();
+
+ unsigned rs = instr->Rs();
+ unsigned rt = instr->Rt();
+ unsigned rt2 = instr->Rt2();
+ unsigned rn = instr->Rn();
+
+ LoadStoreExclusive op =
+ static_cast<LoadStoreExclusive>(instr->Mask(LoadStoreExclusiveMask));
+
+ bool is_acquire_release = instr->LdStXAcquireRelease();
+ bool is_exclusive = !instr->LdStXNotExclusive();
+ bool is_load = instr->LdStXLoad();
+ bool is_pair = instr->LdStXPair();
+
+ unsigned element_size = 1 << instr->LdStXSizeLog2();
+ unsigned access_size = is_pair ? element_size * 2 : element_size;
+ uint64_t address = reg<uint64_t>(rn, Reg31IsStackPointer);
+
+ // Verify that the address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ // Check the alignment of `address`.
+ if (AlignDown(address, access_size) != address) {
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ // The sp must be aligned to 16 bytes when it is accessed.
+ if ((rn == 31) && (AlignDown(address, 16) != address)) {
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ if (is_load) {
+ if (is_exclusive) {
+ local_monitor_.MarkExclusive(address, access_size);
+ } else {
+ // Any non-exclusive load can clear the local monitor as a side effect. We
+ // don't need to do this, but it is useful to stress the simulated code.
+ local_monitor_.Clear();
+ }
+
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_FP_REGS). We
+ // will print a more detailed log.
+ switch (op) {
+ case LDXRB_w:
+ case LDAXRB_w:
+ case LDARB_w:
+ set_wreg(rt, Read<uint8_t>(address), NoRegLog);
+ break;
+ case LDXRH_w:
+ case LDAXRH_w:
+ case LDARH_w:
+ set_wreg(rt, Read<uint16_t>(address), NoRegLog);
+ break;
+ case LDXR_w:
+ case LDAXR_w:
+ case LDAR_w:
+ set_wreg(rt, Read<uint32_t>(address), NoRegLog);
+ break;
+ case LDXR_x:
+ case LDAXR_x:
+ case LDAR_x:
+ set_xreg(rt, Read<uint64_t>(address), NoRegLog);
+ break;
+ case LDXP_w:
+ case LDAXP_w:
+ set_wreg(rt, Read<uint32_t>(address), NoRegLog);
+ set_wreg(rt2, Read<uint32_t>(address + element_size), NoRegLog);
+ break;
+ case LDXP_x:
+ case LDAXP_x:
+ set_xreg(rt, Read<uint64_t>(address), NoRegLog);
+ set_xreg(rt2, Read<uint64_t>(address + element_size), NoRegLog);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ if (is_acquire_release) {
+ // Approximate load-acquire by issuing a full barrier after the load.
+ js::jit::AtomicOperations::fenceSeqCst();
+ }
+
+ LogRead(address, rt, GetPrintRegisterFormatForSize(element_size));
+ if (is_pair) {
+ LogRead(address + element_size, rt2,
+ GetPrintRegisterFormatForSize(element_size));
+ }
+ } else {
+ if (is_acquire_release) {
+ // Approximate store-release by issuing a full barrier before the store.
+ js::jit::AtomicOperations::fenceSeqCst();
+ }
+
+ bool do_store = true;
+ if (is_exclusive) {
+ do_store = local_monitor_.IsExclusive(address, access_size) &&
+ global_monitor_.IsExclusive(address, access_size);
+ set_wreg(rs, do_store ? 0 : 1);
+
+ // - All exclusive stores explicitly clear the local monitor.
+ local_monitor_.Clear();
+ } else {
+ // - Any other store can clear the local monitor as a side effect.
+ local_monitor_.MaybeClear();
+ }
+
+ if (do_store) {
+ switch (op) {
+ case STXRB_w:
+ case STLXRB_w:
+ case STLRB_w:
+ Write<uint8_t>(address, wreg(rt));
+ break;
+ case STXRH_w:
+ case STLXRH_w:
+ case STLRH_w:
+ Write<uint16_t>(address, wreg(rt));
+ break;
+ case STXR_w:
+ case STLXR_w:
+ case STLR_w:
+ Write<uint32_t>(address, wreg(rt));
+ break;
+ case STXR_x:
+ case STLXR_x:
+ case STLR_x:
+ Write<uint64_t>(address, xreg(rt));
+ break;
+ case STXP_w:
+ case STLXP_w:
+ Write<uint32_t>(address, wreg(rt));
+ Write<uint32_t>(address + element_size, wreg(rt2));
+ break;
+ case STXP_x:
+ case STLXP_x:
+ Write<uint64_t>(address, xreg(rt));
+ Write<uint64_t>(address + element_size, xreg(rt2));
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ LogWrite(address, rt, GetPrintRegisterFormatForSize(element_size));
+ if (is_pair) {
+ LogWrite(address + element_size, rt2,
+ GetPrintRegisterFormatForSize(element_size));
+ }
+ }
+ }
+}
+
+
+void Simulator::VisitLoadLiteral(const Instruction* instr) {
+ unsigned rt = instr->Rt();
+ uint64_t address = instr->LiteralAddress<uint64_t>();
+
+ // Verify that the calculated address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ switch (instr->Mask(LoadLiteralMask)) {
+ // Use NoRegLog to suppress the register trace (LOG_REGS, LOG_VREGS), then
+ // print a more detailed log.
+ case LDR_w_lit:
+ set_wreg(rt, Read<uint32_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintWReg);
+ break;
+ case LDR_x_lit:
+ set_xreg(rt, Read<uint64_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintXReg);
+ break;
+ case LDR_s_lit:
+ set_sreg(rt, Read<float>(address), NoRegLog);
+ LogVRead(address, rt, kPrintSReg);
+ break;
+ case LDR_d_lit:
+ set_dreg(rt, Read<double>(address), NoRegLog);
+ LogVRead(address, rt, kPrintDReg);
+ break;
+ case LDR_q_lit:
+ set_qreg(rt, Read<qreg_t>(address), NoRegLog);
+ LogVRead(address, rt, kPrintReg1Q);
+ break;
+ case LDRSW_x_lit:
+ set_xreg(rt, Read<int32_t>(address), NoRegLog);
+ LogRead(address, rt, kPrintWReg);
+ break;
+
+ // Ignore prfm hint instructions.
+ case PRFM_lit: break;
+
+ default: VIXL_UNREACHABLE();
+ }
+
+ local_monitor_.MaybeClear();
+}
+
+
+uintptr_t Simulator::AddressModeHelper(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode) {
+ uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
+
+ if ((addr_reg == 31) && ((address % 16) != 0)) {
+ // When the base register is SP the stack pointer is required to be
+ // quadword aligned prior to the address calculation and write-backs.
+ // Misalignment will cause a stack alignment fault.
+ VIXL_ALIGNMENT_EXCEPTION();
+ }
+
+ if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
+ VIXL_ASSERT(offset != 0);
+ // Only preindex should log the register update here. For Postindex, the
+ // update will be printed automatically by LogWrittenRegisters _after_ the
+ // memory access itself is logged.
+ RegLogMode log_mode = (addrmode == PreIndex) ? LogRegWrites : NoRegLog;
+ set_xreg(addr_reg, address + offset, log_mode, Reg31IsStackPointer);
+ }
+
+ if ((addrmode == Offset) || (addrmode == PreIndex)) {
+ address += offset;
+ }
+
+ // Verify that the calculated address is available to the host.
+ VIXL_ASSERT(address == static_cast<uintptr_t>(address));
+
+ return static_cast<uintptr_t>(address);
+}
+
+
+void Simulator::VisitMoveWideImmediate(const Instruction* instr) {
+ MoveWideImmediateOp mov_op =
+ static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
+ int64_t new_xn_val = 0;
+
+ bool is_64_bits = instr->SixtyFourBits() == 1;
+ // Shift is limited for W operations.
+ VIXL_ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+
+ // Get the shifted immediate.
+ int64_t shift = instr->ShiftMoveWide() * 16;
+ int64_t shifted_imm16 = static_cast<int64_t>(instr->ImmMoveWide()) << shift;
+
+ // Compute the new value.
+ switch (mov_op) {
+ case MOVN_w:
+ case MOVN_x: {
+ new_xn_val = ~shifted_imm16;
+ if (!is_64_bits) new_xn_val &= kWRegMask;
+ break;
+ }
+ case MOVK_w:
+ case MOVK_x: {
+ unsigned reg_code = instr->Rd();
+ int64_t prev_xn_val = is_64_bits ? xreg(reg_code)
+ : wreg(reg_code);
+ new_xn_val =
+ (prev_xn_val & ~(INT64_C(0xffff) << shift)) | shifted_imm16;
+ break;
+ }
+ case MOVZ_w:
+ case MOVZ_x: {
+ new_xn_val = shifted_imm16;
+ break;
+ }
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ // Update the destination register.
+ set_xreg(instr->Rd(), new_xn_val);
+}
+
+
+void Simulator::VisitConditionalSelect(const Instruction* instr) {
+ uint64_t new_val = xreg(instr->Rn());
+
+ if (ConditionFailed(static_cast<Condition>(instr->Condition()))) {
+ new_val = xreg(instr->Rm());
+ switch (instr->Mask(ConditionalSelectMask)) {
+ case CSEL_w:
+ case CSEL_x: break;
+ case CSINC_w:
+ case CSINC_x: new_val++; break;
+ case CSINV_w:
+ case CSINV_x: new_val = ~new_val; break;
+ case CSNEG_w:
+ case CSNEG_x: new_val = -new_val; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ set_reg(reg_size, instr->Rd(), new_val);
+}
+
+
+void Simulator::VisitDataProcessing1Source(const Instruction* instr) {
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ switch (instr->Mask(DataProcessing1SourceMask)) {
+ case RBIT_w: set_wreg(dst, ReverseBits(wreg(src))); break;
+ case RBIT_x: set_xreg(dst, ReverseBits(xreg(src))); break;
+ case REV16_w: set_wreg(dst, ReverseBytes(wreg(src), 1)); break;
+ case REV16_x: set_xreg(dst, ReverseBytes(xreg(src), 1)); break;
+ case REV_w: set_wreg(dst, ReverseBytes(wreg(src), 2)); break;
+ case REV32_x: set_xreg(dst, ReverseBytes(xreg(src), 2)); break;
+ case REV_x: set_xreg(dst, ReverseBytes(xreg(src), 3)); break;
+ case CLZ_w: set_wreg(dst, CountLeadingZeros(wreg(src))); break;
+ case CLZ_x: set_xreg(dst, CountLeadingZeros(xreg(src))); break;
+ case CLS_w: {
+ set_wreg(dst, CountLeadingSignBits(wreg(src)));
+ break;
+ }
+ case CLS_x: {
+ set_xreg(dst, CountLeadingSignBits(xreg(src)));
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+uint32_t Simulator::Poly32Mod2(unsigned n, uint64_t data, uint32_t poly) {
+ VIXL_ASSERT((n > 32) && (n <= 64));
+ for (unsigned i = (n - 1); i >= 32; i--) {
+ if (((data >> i) & 1) != 0) {
+ uint64_t polysh32 = (uint64_t)poly << (i - 32);
+ uint64_t mask = (UINT64_C(1) << i) - 1;
+ data = ((data & mask) ^ polysh32);
+ }
+ }
+ return data & 0xffffffff;
+}
+
+
+template <typename T>
+uint32_t Simulator::Crc32Checksum(uint32_t acc, T val, uint32_t poly) {
+ unsigned size = sizeof(val) * 8; // Number of bits in type T.
+ VIXL_ASSERT((size == 8) || (size == 16) || (size == 32));
+ uint64_t tempacc = static_cast<uint64_t>(ReverseBits(acc)) << size;
+ uint64_t tempval = static_cast<uint64_t>(ReverseBits(val)) << 32;
+ return ReverseBits(Poly32Mod2(32 + size, tempacc ^ tempval, poly));
+}
+
+
+uint32_t Simulator::Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly) {
+ // Poly32Mod2 cannot handle inputs with more than 32 bits, so compute
+ // the CRC of each 32-bit word sequentially.
+ acc = Crc32Checksum(acc, (uint32_t)(val & 0xffffffff), poly);
+ return Crc32Checksum(acc, (uint32_t)(val >> 32), poly);
+}
+
+
+void Simulator::VisitDataProcessing2Source(const Instruction* instr) {
+ Shift shift_op = NO_SHIFT;
+ int64_t result = 0;
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+
+ switch (instr->Mask(DataProcessing2SourceMask)) {
+ case SDIV_w: {
+ int32_t rn = wreg(instr->Rn());
+ int32_t rm = wreg(instr->Rm());
+ if ((rn == kWMinInt) && (rm == -1)) {
+ result = kWMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case SDIV_x: {
+ int64_t rn = xreg(instr->Rn());
+ int64_t rm = xreg(instr->Rm());
+ if ((rn == kXMinInt) && (rm == -1)) {
+ result = kXMinInt;
+ } else if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_w: {
+ uint32_t rn = static_cast<uint32_t>(wreg(instr->Rn()));
+ uint32_t rm = static_cast<uint32_t>(wreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case UDIV_x: {
+ uint64_t rn = static_cast<uint64_t>(xreg(instr->Rn()));
+ uint64_t rm = static_cast<uint64_t>(xreg(instr->Rm()));
+ if (rm == 0) {
+ // Division by zero can be trapped, but not on A-class processors.
+ result = 0;
+ } else {
+ result = rn / rm;
+ }
+ break;
+ }
+ case LSLV_w:
+ case LSLV_x: shift_op = LSL; break;
+ case LSRV_w:
+ case LSRV_x: shift_op = LSR; break;
+ case ASRV_w:
+ case ASRV_x: shift_op = ASR; break;
+ case RORV_w:
+ case RORV_x: shift_op = ROR; break;
+ case CRC32B: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint8_t val = reg<uint8_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32H: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint16_t val = reg<uint16_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32W: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint32_t val = reg<uint32_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ break;
+ }
+ case CRC32X: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint64_t val = reg<uint64_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32_POLY);
+ reg_size = kWRegSize;
+ break;
+ }
+ case CRC32CB: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint8_t val = reg<uint8_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CH: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint16_t val = reg<uint16_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CW: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint32_t val = reg<uint32_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ break;
+ }
+ case CRC32CX: {
+ uint32_t acc = reg<uint32_t>(instr->Rn());
+ uint64_t val = reg<uint64_t>(instr->Rm());
+ result = Crc32Checksum(acc, val, CRC32C_POLY);
+ reg_size = kWRegSize;
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ if (shift_op != NO_SHIFT) {
+ // Shift distance encoded in the least-significant five/six bits of the
+ // register.
+ int mask = (instr->SixtyFourBits() == 1) ? 0x3f : 0x1f;
+ unsigned shift = wreg(instr->Rm()) & mask;
+ result = ShiftOperand(reg_size, reg(reg_size, instr->Rn()), shift_op,
+ shift);
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+// The algorithm used is adapted from the one described in section 8.2 of
+// Hacker's Delight, by Henry S. Warren, Jr.
+// It assumes that a right shift on a signed integer is an arithmetic shift.
+// Type T must be either uint64_t or int64_t.
+template <typename T>
+static T MultiplyHigh(T u, T v) {
+ uint64_t u0, v0, w0;
+ T u1, v1, w1, w2, t;
+
+ VIXL_ASSERT(sizeof(u) == sizeof(u0));
+
+ u0 = u & 0xffffffff;
+ u1 = u >> 32;
+ v0 = v & 0xffffffff;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xffffffff;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+void Simulator::VisitDataProcessing3Source(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+
+ int64_t result = 0;
+ // Extract and sign- or zero-extend 32-bit arguments for widening operations.
+ uint64_t rn_u32 = reg<uint32_t>(instr->Rn());
+ uint64_t rm_u32 = reg<uint32_t>(instr->Rm());
+ int64_t rn_s32 = reg<int32_t>(instr->Rn());
+ int64_t rm_s32 = reg<int32_t>(instr->Rm());
+ switch (instr->Mask(DataProcessing3SourceMask)) {
+ case MADD_w:
+ case MADD_x:
+ result = xreg(instr->Ra()) + (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case MSUB_w:
+ case MSUB_x:
+ result = xreg(instr->Ra()) - (xreg(instr->Rn()) * xreg(instr->Rm()));
+ break;
+ case SMADDL_x: result = xreg(instr->Ra()) + (rn_s32 * rm_s32); break;
+ case SMSUBL_x: result = xreg(instr->Ra()) - (rn_s32 * rm_s32); break;
+ case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
+ case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
+ case UMULH_x:
+ result = MultiplyHigh(reg<uint64_t>(instr->Rn()),
+ reg<uint64_t>(instr->Rm()));
+ break;
+ case SMULH_x:
+ result = MultiplyHigh(xreg(instr->Rn()), xreg(instr->Rm()));
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitBitfield(const Instruction* instr) {
+ unsigned reg_size = instr->SixtyFourBits() ? kXRegSize : kWRegSize;
+ int64_t reg_mask = instr->SixtyFourBits() ? kXRegMask : kWRegMask;
+ int64_t R = instr->ImmR();
+ int64_t S = instr->ImmS();
+ int64_t diff = S - R;
+ int64_t mask;
+ if (diff >= 0) {
+ mask = (diff < (reg_size - 1)) ? (INT64_C(1) << (diff + 1)) - 1
+ : reg_mask;
+ } else {
+ mask = (INT64_C(1) << (S + 1)) - 1;
+ mask = (static_cast<uint64_t>(mask) >> R) | (mask << (reg_size - R));
+ diff += reg_size;
+ }
+
+ // inzero indicates if the extracted bitfield is inserted into the
+ // destination register value or in zero.
+ // If extend is true, extend the sign of the extracted bitfield.
+ bool inzero = false;
+ bool extend = false;
+ switch (instr->Mask(BitfieldMask)) {
+ case BFM_x:
+ case BFM_w:
+ break;
+ case SBFM_x:
+ case SBFM_w:
+ inzero = true;
+ extend = true;
+ break;
+ case UBFM_x:
+ case UBFM_w:
+ inzero = true;
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+
+ int64_t dst = inzero ? 0 : reg(reg_size, instr->Rd());
+ int64_t src = reg(reg_size, instr->Rn());
+ // Rotate source bitfield into place.
+ int64_t result = (static_cast<uint64_t>(src) >> R) | (src << (reg_size - R));
+ // Determine the sign extension.
+ int64_t topbits = ((INT64_C(1) << (reg_size - diff - 1)) - 1) << (diff + 1);
+ int64_t signbits = extend && ((src >> S) & 1) ? topbits : 0;
+
+ // Merge sign extension, dest/zero and bitfield.
+ result = signbits | (result & mask) | (dst & ~mask);
+
+ set_reg(reg_size, instr->Rd(), result);
+}
+
+
+void Simulator::VisitExtract(const Instruction* instr) {
+ unsigned lsb = instr->ImmS();
+ unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSize
+ : kWRegSize;
+ uint64_t low_res = static_cast<uint64_t>(reg(reg_size, instr->Rm())) >> lsb;
+ uint64_t high_res =
+ (lsb == 0) ? 0 : reg(reg_size, instr->Rn()) << (reg_size - lsb);
+ set_reg(reg_size, instr->Rd(), low_res | high_res);
+}
+
+
+void Simulator::VisitFPImmediate(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dest = instr->Rd();
+ switch (instr->Mask(FPImmediateMask)) {
+ case FMOV_s_imm: set_sreg(dest, instr->ImmFP32()); break;
+ case FMOV_d_imm: set_dreg(dest, instr->ImmFP64()); break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPIntegerConvert(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPIntegerConvertMask)) {
+ case FCVTAS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieAway)); break;
+ case FCVTAS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieAway)); break;
+ case FCVTAS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieAway)); break;
+ case FCVTAS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieAway)); break;
+ case FCVTAU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieAway)); break;
+ case FCVTAU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieAway)); break;
+ case FCVTAU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieAway)); break;
+ case FCVTAU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieAway)); break;
+ case FCVTMS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTMU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPNegativeInfinity));
+ break;
+ case FCVTPS_ws:
+ set_wreg(dst, FPToInt32(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_xs:
+ set_xreg(dst, FPToInt64(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_wd:
+ set_wreg(dst, FPToInt32(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPS_xd:
+ set_xreg(dst, FPToInt64(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_ws:
+ set_wreg(dst, FPToUInt32(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_xs:
+ set_xreg(dst, FPToUInt64(sreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_wd:
+ set_wreg(dst, FPToUInt32(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTPU_xd:
+ set_xreg(dst, FPToUInt64(dreg(src), FPPositiveInfinity));
+ break;
+ case FCVTNS_ws: set_wreg(dst, FPToInt32(sreg(src), FPTieEven)); break;
+ case FCVTNS_xs: set_xreg(dst, FPToInt64(sreg(src), FPTieEven)); break;
+ case FCVTNS_wd: set_wreg(dst, FPToInt32(dreg(src), FPTieEven)); break;
+ case FCVTNS_xd: set_xreg(dst, FPToInt64(dreg(src), FPTieEven)); break;
+ case FCVTNU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPTieEven)); break;
+ case FCVTNU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPTieEven)); break;
+ case FCVTNU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPTieEven)); break;
+ case FCVTNU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPTieEven)); break;
+ case FCVTZS_ws: set_wreg(dst, FPToInt32(sreg(src), FPZero)); break;
+ case FCVTZS_xs: set_xreg(dst, FPToInt64(sreg(src), FPZero)); break;
+ case FCVTZS_wd: set_wreg(dst, FPToInt32(dreg(src), FPZero)); break;
+ case FCVTZS_xd: set_xreg(dst, FPToInt64(dreg(src), FPZero)); break;
+ case FCVTZU_ws: set_wreg(dst, FPToUInt32(sreg(src), FPZero)); break;
+ case FCVTZU_xs: set_xreg(dst, FPToUInt64(sreg(src), FPZero)); break;
+ case FCVTZU_wd: set_wreg(dst, FPToUInt32(dreg(src), FPZero)); break;
+ case FCVTZU_xd: set_xreg(dst, FPToUInt64(dreg(src), FPZero)); break;
+ case FJCVTZS: set_wreg(dst, FPToFixedJS(dreg(src))); break;
+ case FMOV_ws: set_wreg(dst, sreg_bits(src)); break;
+ case FMOV_xd: set_xreg(dst, dreg_bits(src)); break;
+ case FMOV_sw: set_sreg_bits(dst, wreg(src)); break;
+ case FMOV_dx: set_dreg_bits(dst, xreg(src)); break;
+ case FMOV_d1_x:
+ LogicVRegister(vreg(dst)).SetUint(kFormatD, 1, xreg(src));
+ break;
+ case FMOV_x_d1:
+ set_xreg(dst, LogicVRegister(vreg(src)).Uint(kFormatD, 1));
+ break;
+
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx: set_dreg(dst, FixedToDouble(xreg(src), 0, round)); break;
+ case SCVTF_dw: set_dreg(dst, FixedToDouble(wreg(src), 0, round)); break;
+ case UCVTF_dx: set_dreg(dst, UFixedToDouble(xreg(src), 0, round)); break;
+ case UCVTF_dw: {
+ set_dreg(dst, UFixedToDouble(static_cast<uint32_t>(wreg(src)), 0, round));
+ break;
+ }
+ case SCVTF_sx: set_sreg(dst, FixedToFloat(xreg(src), 0, round)); break;
+ case SCVTF_sw: set_sreg(dst, FixedToFloat(wreg(src), 0, round)); break;
+ case UCVTF_sx: set_sreg(dst, UFixedToFloat(xreg(src), 0, round)); break;
+ case UCVTF_sw: {
+ set_sreg(dst, UFixedToFloat(static_cast<uint32_t>(wreg(src)), 0, round));
+ break;
+ }
+
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPFixedPointConvert(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned dst = instr->Rd();
+ unsigned src = instr->Rn();
+ int fbits = 64 - instr->FPScale();
+
+ FPRounding round = RMode();
+
+ switch (instr->Mask(FPFixedPointConvertMask)) {
+ // A 32-bit input can be handled in the same way as a 64-bit input, since
+ // the sign- or zero-extension will not affect the conversion.
+ case SCVTF_dx_fixed:
+ set_dreg(dst, FixedToDouble(xreg(src), fbits, round));
+ break;
+ case SCVTF_dw_fixed:
+ set_dreg(dst, FixedToDouble(wreg(src), fbits, round));
+ break;
+ case UCVTF_dx_fixed:
+ set_dreg(dst, UFixedToDouble(xreg(src), fbits, round));
+ break;
+ case UCVTF_dw_fixed: {
+ set_dreg(dst,
+ UFixedToDouble(static_cast<uint32_t>(wreg(src)), fbits, round));
+ break;
+ }
+ case SCVTF_sx_fixed:
+ set_sreg(dst, FixedToFloat(xreg(src), fbits, round));
+ break;
+ case SCVTF_sw_fixed:
+ set_sreg(dst, FixedToFloat(wreg(src), fbits, round));
+ break;
+ case UCVTF_sx_fixed:
+ set_sreg(dst, UFixedToFloat(xreg(src), fbits, round));
+ break;
+ case UCVTF_sw_fixed: {
+ set_sreg(dst,
+ UFixedToFloat(static_cast<uint32_t>(wreg(src)), fbits, round));
+ break;
+ }
+ case FCVTZS_xd_fixed:
+ set_xreg(dst, FPToInt64(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZS_wd_fixed:
+ set_wreg(dst, FPToInt32(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZU_xd_fixed:
+ set_xreg(dst, FPToUInt64(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZU_wd_fixed:
+ set_wreg(dst, FPToUInt32(dreg(src) * std::pow(2.0, fbits), FPZero));
+ break;
+ case FCVTZS_xs_fixed:
+ set_xreg(dst, FPToInt64(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZS_ws_fixed:
+ set_wreg(dst, FPToInt32(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZU_xs_fixed:
+ set_xreg(dst, FPToUInt64(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ case FCVTZU_ws_fixed:
+ set_wreg(dst, FPToUInt32(sreg(src) * std::pow(2.0f, fbits), FPZero));
+ break;
+ default: VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPCompare(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPTrapFlags trap = DisableTrap;
+ switch (instr->Mask(FPCompareMask)) {
+ case FCMPE_s: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_s: FPCompare(sreg(instr->Rn()), sreg(instr->Rm()), trap); break;
+ case FCMPE_d: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_d: FPCompare(dreg(instr->Rn()), dreg(instr->Rm()), trap); break;
+ case FCMPE_s_zero: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_s_zero: FPCompare(sreg(instr->Rn()), 0.0f, trap); break;
+ case FCMPE_d_zero: trap = EnableTrap; VIXL_FALLTHROUGH();
+ case FCMP_d_zero: FPCompare(dreg(instr->Rn()), 0.0, trap); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalCompare(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPTrapFlags trap = DisableTrap;
+ switch (instr->Mask(FPConditionalCompareMask)) {
+ case FCCMPE_s: trap = EnableTrap;
+ VIXL_FALLTHROUGH();
+ case FCCMP_s:
+ if (ConditionPassed(instr->Condition())) {
+ FPCompare(sreg(instr->Rn()), sreg(instr->Rm()), trap);
+ } else {
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+ break;
+ case FCCMPE_d: trap = EnableTrap;
+ VIXL_FALLTHROUGH();
+ case FCCMP_d:
+ if (ConditionPassed(instr->Condition())) {
+ FPCompare(dreg(instr->Rn()), dreg(instr->Rm()), trap);
+ } else {
+ nzcv().SetFlags(instr->Nzcv());
+ LogSystemRegister(NZCV);
+ }
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPConditionalSelect(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ Instr selected;
+ if (ConditionPassed(instr->Condition())) {
+ selected = instr->Rn();
+ } else {
+ selected = instr->Rm();
+ }
+
+ switch (instr->Mask(FPConditionalSelectMask)) {
+ case FCSEL_s: set_sreg(instr->Rd(), sreg(selected)); break;
+ case FCSEL_d: set_dreg(instr->Rd(), dreg(selected)); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing1Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+ VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ bool inexact_exception = false;
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+
+ switch (instr->Mask(FPDataProcessing1SourceMask)) {
+ case FMOV_s: set_sreg(fd, sreg(fn)); return;
+ case FMOV_d: set_dreg(fd, dreg(fn)); return;
+ case FABS_s: fabs_(kFormatS, vreg(fd), vreg(fn)); return;
+ case FABS_d: fabs_(kFormatD, vreg(fd), vreg(fn)); return;
+ case FNEG_s: fneg(kFormatS, vreg(fd), vreg(fn)); return;
+ case FNEG_d: fneg(kFormatD, vreg(fd), vreg(fn)); return;
+ case FCVT_ds:
+ set_dreg(fd, FPToDouble(sreg(fn), ReadDN()));
+ return;
+ case FCVT_sd:
+ set_sreg(fd, FPToFloat(dreg(fn), FPTieEven, ReadDN()));
+ return;
+ case FCVT_hs:
+ set_hreg(fd, Float16ToRawbits(FPToFloat16(sreg(fn), FPTieEven, ReadDN())));
+ return;
+ case FCVT_sh:
+ set_sreg(fd, FPToFloat(RawbitsToFloat16(hreg(fn)), ReadDN()));
+ return;
+ case FCVT_dh:
+ set_dreg(fd, FPToDouble(hreg(fn), ReadDN()));
+ return;
+ case FCVT_hd:
+ set_hreg(fd, Float16ToRawbits(FPToFloat16(dreg(fn), FPTieEven, ReadDN())));
+ return;
+ case FSQRT_s:
+ case FSQRT_d: fsqrt(vform, rd, rn); return;
+ case FRINTI_s:
+ case FRINTI_d: break; // Use FPCR rounding mode.
+ case FRINTX_s:
+ case FRINTX_d: inexact_exception = true; break;
+ case FRINTA_s:
+ case FRINTA_d: fpcr_rounding = FPTieAway; break;
+ case FRINTM_s:
+ case FRINTM_d: fpcr_rounding = FPNegativeInfinity; break;
+ case FRINTN_s:
+ case FRINTN_d: fpcr_rounding = FPTieEven; break;
+ case FRINTP_s:
+ case FRINTP_d: fpcr_rounding = FPPositiveInfinity; break;
+ case FRINTZ_s:
+ case FRINTZ_d: fpcr_rounding = FPZero; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ // Only FRINT* instructions fall through the switch above.
+ frint(vform, rd, rn, fpcr_rounding, inexact_exception);
+}
+
+
+void Simulator::VisitFPDataProcessing2Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ VectorFormat vform = (instr->Mask(FP64) == FP64) ? kFormatD : kFormatS;
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(FPDataProcessing2SourceMask)) {
+ case FADD_s:
+ case FADD_d: fadd(vform, rd, rn, rm); break;
+ case FSUB_s:
+ case FSUB_d: fsub(vform, rd, rn, rm); break;
+ case FMUL_s:
+ case FMUL_d: fmul(vform, rd, rn, rm); break;
+ case FNMUL_s:
+ case FNMUL_d: fnmul(vform, rd, rn, rm); break;
+ case FDIV_s:
+ case FDIV_d: fdiv(vform, rd, rn, rm); break;
+ case FMAX_s:
+ case FMAX_d: fmax(vform, rd, rn, rm); break;
+ case FMIN_s:
+ case FMIN_d: fmin(vform, rd, rn, rm); break;
+ case FMAXNM_s:
+ case FMAXNM_d: fmaxnm(vform, rd, rn, rm); break;
+ case FMINNM_s:
+ case FMINNM_d: fminnm(vform, rd, rn, rm); break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+}
+
+
+void Simulator::VisitFPDataProcessing3Source(const Instruction* instr) {
+ AssertSupportedFPCR();
+
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ unsigned fa = instr->Ra();
+
+ switch (instr->Mask(FPDataProcessing3SourceMask)) {
+ // fd = fa +/- (fn * fm)
+ case FMADD_s: set_sreg(fd, FPMulAdd(sreg(fa), sreg(fn), sreg(fm))); break;
+ case FMSUB_s: set_sreg(fd, FPMulAdd(sreg(fa), -sreg(fn), sreg(fm))); break;
+ case FMADD_d: set_dreg(fd, FPMulAdd(dreg(fa), dreg(fn), dreg(fm))); break;
+ case FMSUB_d: set_dreg(fd, FPMulAdd(dreg(fa), -dreg(fn), dreg(fm))); break;
+ // Negated variants of the above.
+ case FNMADD_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), -sreg(fn), sreg(fm)));
+ break;
+ case FNMSUB_s:
+ set_sreg(fd, FPMulAdd(-sreg(fa), sreg(fn), sreg(fm)));
+ break;
+ case FNMADD_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), -dreg(fn), dreg(fm)));
+ break;
+ case FNMSUB_d:
+ set_dreg(fd, FPMulAdd(-dreg(fa), dreg(fn), dreg(fm)));
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+bool Simulator::FPProcessNaNs(const Instruction* instr) {
+ unsigned fd = instr->Rd();
+ unsigned fn = instr->Rn();
+ unsigned fm = instr->Rm();
+ bool done = false;
+
+ if (instr->Mask(FP64) == FP64) {
+ double result = FPProcessNaNs(dreg(fn), dreg(fm));
+ if (std::isnan(result)) {
+ set_dreg(fd, result);
+ done = true;
+ }
+ } else {
+ float result = FPProcessNaNs(sreg(fn), sreg(fm));
+ if (std::isnan(result)) {
+ set_sreg(fd, result);
+ done = true;
+ }
+ }
+
+ return done;
+}
+
+
+void Simulator::SysOp_W(int op, int64_t val) {
+ switch (op) {
+ case IVAU:
+ case CVAC:
+ case CVAU:
+ case CIVAC: {
+ // Perform a dummy memory access to ensure that we have read access
+ // to the specified address.
+ volatile uint8_t y = Read<uint8_t>(val);
+ USE(y);
+ // TODO: Implement "case ZVA:".
+ break;
+ }
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitSystem(const Instruction* instr) {
+ // Some system instructions hijack their Op and Cp fields to represent a
+ // range of immediates instead of indicating a different instruction. This
+ // makes the decoding tricky.
+ if (instr->Mask(SystemExclusiveMonitorFMask) == SystemExclusiveMonitorFixed) {
+ VIXL_ASSERT(instr->Mask(SystemExclusiveMonitorMask) == CLREX);
+ switch (instr->Mask(SystemExclusiveMonitorMask)) {
+ case CLREX: {
+ PrintExclusiveAccessWarning();
+ ClearLocalMonitor();
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemSysRegFMask) == SystemSysRegFixed) {
+ switch (instr->Mask(SystemSysRegMask)) {
+ case MRS: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV: set_xreg(instr->Rt(), nzcv().RawValue()); break;
+ case FPCR: set_xreg(instr->Rt(), fpcr().RawValue()); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ case MSR: {
+ switch (instr->ImmSystemRegister()) {
+ case NZCV:
+ nzcv().SetRawValue(wreg(instr->Rt()));
+ LogSystemRegister(NZCV);
+ break;
+ case FPCR:
+ fpcr().SetRawValue(wreg(instr->Rt()));
+ LogSystemRegister(FPCR);
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ }
+ } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
+ VIXL_ASSERT(instr->Mask(SystemHintMask) == HINT);
+ switch (instr->ImmHint()) {
+ case NOP: break;
+ case CSDB: break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(MemBarrierFMask) == MemBarrierFixed) {
+ js::jit::AtomicOperations::fenceSeqCst();
+ } else if ((instr->Mask(SystemSysFMask) == SystemSysFixed)) {
+ switch (instr->Mask(SystemSysMask)) {
+ case SYS: SysOp_W(instr->SysOp(), xreg(instr->Rt())); break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitCrypto2RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitCrypto3RegSHA(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitCryptoAES(const Instruction* instr) {
+ VisitUnimplemented(instr);
+}
+
+
+void Simulator::VisitNEON2RegMisc(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ static const NEONFormatMap map_lp = {
+ {23, 22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S, NF_1D, NF_2D}
+ };
+ VectorFormat vf_lp = nfd.GetVectorFormat(&map_lp);
+
+ static const NEONFormatMap map_fcvtl = {
+ {22}, {NF_4S, NF_2D}
+ };
+ VectorFormat vf_fcvtl = nfd.GetVectorFormat(&map_fcvtl);
+
+ static const NEONFormatMap map_fcvtn = {
+ {22, 30}, {NF_4H, NF_8H, NF_2S, NF_4S}
+ };
+ VectorFormat vf_fcvtn = nfd.GetVectorFormat(&map_fcvtn);
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_REV64: rev64(vf, rd, rn); break;
+ case NEON_REV32: rev32(vf, rd, rn); break;
+ case NEON_REV16: rev16(vf, rd, rn); break;
+ case NEON_SUQADD: suqadd(vf, rd, rn); break;
+ case NEON_USQADD: usqadd(vf, rd, rn); break;
+ case NEON_CLS: cls(vf, rd, rn); break;
+ case NEON_CLZ: clz(vf, rd, rn); break;
+ case NEON_CNT: cnt(vf, rd, rn); break;
+ case NEON_SQABS: abs(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_SQNEG: neg(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_CMGT_zero: cmp(vf, rd, rn, 0, gt); break;
+ case NEON_CMGE_zero: cmp(vf, rd, rn, 0, ge); break;
+ case NEON_CMEQ_zero: cmp(vf, rd, rn, 0, eq); break;
+ case NEON_CMLE_zero: cmp(vf, rd, rn, 0, le); break;
+ case NEON_CMLT_zero: cmp(vf, rd, rn, 0, lt); break;
+ case NEON_ABS: abs(vf, rd, rn); break;
+ case NEON_NEG: neg(vf, rd, rn); break;
+ case NEON_SADDLP: saddlp(vf_lp, rd, rn); break;
+ case NEON_UADDLP: uaddlp(vf_lp, rd, rn); break;
+ case NEON_SADALP: sadalp(vf_lp, rd, rn); break;
+ case NEON_UADALP: uadalp(vf_lp, rd, rn); break;
+ case NEON_RBIT_NOT:
+ vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
+ switch (instr->FPType()) {
+ case 0: not_(vf, rd, rn); break;
+ case 1: rbit(vf, rd, rn);; break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ break;
+ }
+ } else {
+ VectorFormat fpf = nfd.GetVectorFormat(nfd.FPFormatMap());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+ bool inexact_exception = false;
+
+ // These instructions all use a one bit size field, except XTN, SQXTUN,
+ // SHLL, SQXTN and UQXTN, which use a two bit size field.
+ switch (instr->Mask(NEON2RegMiscFPMask)) {
+ case NEON_FABS: fabs_(fpf, rd, rn); return;
+ case NEON_FNEG: fneg(fpf, rd, rn); return;
+ case NEON_FSQRT: fsqrt(fpf, rd, rn); return;
+ case NEON_FCVTL:
+ if (instr->Mask(NEON_Q)) {
+ fcvtl2(vf_fcvtl, rd, rn);
+ } else {
+ fcvtl(vf_fcvtl, rd, rn);
+ }
+ return;
+ case NEON_FCVTN:
+ if (instr->Mask(NEON_Q)) {
+ fcvtn2(vf_fcvtn, rd, rn);
+ } else {
+ fcvtn(vf_fcvtn, rd, rn);
+ }
+ return;
+ case NEON_FCVTXN:
+ if (instr->Mask(NEON_Q)) {
+ fcvtxn2(vf_fcvtn, rd, rn);
+ } else {
+ fcvtxn(vf_fcvtn, rd, rn);
+ }
+ return;
+
+ // The following instructions break from the switch statement, rather
+ // than return.
+ case NEON_FRINTI: break; // Use FPCR rounding mode.
+ case NEON_FRINTX: inexact_exception = true; break;
+ case NEON_FRINTA: fpcr_rounding = FPTieAway; break;
+ case NEON_FRINTM: fpcr_rounding = FPNegativeInfinity; break;
+ case NEON_FRINTN: fpcr_rounding = FPTieEven; break;
+ case NEON_FRINTP: fpcr_rounding = FPPositiveInfinity; break;
+ case NEON_FRINTZ: fpcr_rounding = FPZero; break;
+
+ case NEON_FCVTNS: fcvts(fpf, rd, rn, FPTieEven); return;
+ case NEON_FCVTNU: fcvtu(fpf, rd, rn, FPTieEven); return;
+ case NEON_FCVTPS: fcvts(fpf, rd, rn, FPPositiveInfinity); return;
+ case NEON_FCVTPU: fcvtu(fpf, rd, rn, FPPositiveInfinity); return;
+ case NEON_FCVTMS: fcvts(fpf, rd, rn, FPNegativeInfinity); return;
+ case NEON_FCVTMU: fcvtu(fpf, rd, rn, FPNegativeInfinity); return;
+ case NEON_FCVTZS: fcvts(fpf, rd, rn, FPZero); return;
+ case NEON_FCVTZU: fcvtu(fpf, rd, rn, FPZero); return;
+ case NEON_FCVTAS: fcvts(fpf, rd, rn, FPTieAway); return;
+ case NEON_FCVTAU: fcvtu(fpf, rd, rn, FPTieAway); return;
+ case NEON_SCVTF: scvtf(fpf, rd, rn, 0, fpcr_rounding); return;
+ case NEON_UCVTF: ucvtf(fpf, rd, rn, 0, fpcr_rounding); return;
+ case NEON_URSQRTE: ursqrte(fpf, rd, rn); return;
+ case NEON_URECPE: urecpe(fpf, rd, rn); return;
+ case NEON_FRSQRTE: frsqrte(fpf, rd, rn); return;
+ case NEON_FRECPE: frecpe(fpf, rd, rn, fpcr_rounding); return;
+ case NEON_FCMGT_zero: fcmp_zero(fpf, rd, rn, gt); return;
+ case NEON_FCMGE_zero: fcmp_zero(fpf, rd, rn, ge); return;
+ case NEON_FCMEQ_zero: fcmp_zero(fpf, rd, rn, eq); return;
+ case NEON_FCMLE_zero: fcmp_zero(fpf, rd, rn, le); return;
+ case NEON_FCMLT_zero: fcmp_zero(fpf, rd, rn, lt); return;
+ default:
+ if ((NEON_XTN_opcode <= instr->Mask(NEON2RegMiscOpcode)) &&
+ (instr->Mask(NEON2RegMiscOpcode) <= NEON_UQXTN_opcode)) {
+ switch (instr->Mask(NEON2RegMiscMask)) {
+ case NEON_XTN: xtn(vf, rd, rn); return;
+ case NEON_SQXTN: sqxtn(vf, rd, rn); return;
+ case NEON_UQXTN: uqxtn(vf, rd, rn); return;
+ case NEON_SQXTUN: sqxtun(vf, rd, rn); return;
+ case NEON_SHLL:
+ vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+ if (instr->Mask(NEON_Q)) {
+ shll2(vf, rd, rn);
+ } else {
+ shll(vf, rd, rn);
+ }
+ return;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ // Only FRINT* instructions fall through the switch above.
+ frint(fpf, rd, rn, fpcr_rounding, inexact_exception);
+ }
+}
+
+
+void Simulator::VisitNEON3Same(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ if (instr->Mask(NEON3SameLogicalFMask) == NEON3SameLogicalFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.LogicalFormatMap());
+ switch (instr->Mask(NEON3SameLogicalMask)) {
+ case NEON_AND: and_(vf, rd, rn, rm); break;
+ case NEON_ORR: orr(vf, rd, rn, rm); break;
+ case NEON_ORN: orn(vf, rd, rn, rm); break;
+ case NEON_EOR: eor(vf, rd, rn, rm); break;
+ case NEON_BIC: bic(vf, rd, rn, rm); break;
+ case NEON_BIF: bif(vf, rd, rn, rm); break;
+ case NEON_BIT: bit(vf, rd, rn, rm); break;
+ case NEON_BSL: bsl(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else if (instr->Mask(NEON3SameFPFMask) == NEON3SameFPFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+ switch (instr->Mask(NEON3SameFPMask)) {
+ case NEON_FADD: fadd(vf, rd, rn, rm); break;
+ case NEON_FSUB: fsub(vf, rd, rn, rm); break;
+ case NEON_FMUL: fmul(vf, rd, rn, rm); break;
+ case NEON_FDIV: fdiv(vf, rd, rn, rm); break;
+ case NEON_FMAX: fmax(vf, rd, rn, rm); break;
+ case NEON_FMIN: fmin(vf, rd, rn, rm); break;
+ case NEON_FMAXNM: fmaxnm(vf, rd, rn, rm); break;
+ case NEON_FMINNM: fminnm(vf, rd, rn, rm); break;
+ case NEON_FMLA: fmla(vf, rd, rn, rm); break;
+ case NEON_FMLS: fmls(vf, rd, rn, rm); break;
+ case NEON_FMULX: fmulx(vf, rd, rn, rm); break;
+ case NEON_FACGE: fabscmp(vf, rd, rn, rm, ge); break;
+ case NEON_FACGT: fabscmp(vf, rd, rn, rm, gt); break;
+ case NEON_FCMEQ: fcmp(vf, rd, rn, rm, eq); break;
+ case NEON_FCMGE: fcmp(vf, rd, rn, rm, ge); break;
+ case NEON_FCMGT: fcmp(vf, rd, rn, rm, gt); break;
+ case NEON_FRECPS: frecps(vf, rd, rn, rm); break;
+ case NEON_FRSQRTS: frsqrts(vf, rd, rn, rm); break;
+ case NEON_FABD: fabd(vf, rd, rn, rm); break;
+ case NEON_FADDP: faddp(vf, rd, rn, rm); break;
+ case NEON_FMAXP: fmaxp(vf, rd, rn, rm); break;
+ case NEON_FMAXNMP: fmaxnmp(vf, rd, rn, rm); break;
+ case NEON_FMINP: fminp(vf, rd, rn, rm); break;
+ case NEON_FMINNMP: fminnmp(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VectorFormat vf = nfd.GetVectorFormat();
+ switch (instr->Mask(NEON3SameMask)) {
+ case NEON_ADD: add(vf, rd, rn, rm); break;
+ case NEON_ADDP: addp(vf, rd, rn, rm); break;
+ case NEON_CMEQ: cmp(vf, rd, rn, rm, eq); break;
+ case NEON_CMGE: cmp(vf, rd, rn, rm, ge); break;
+ case NEON_CMGT: cmp(vf, rd, rn, rm, gt); break;
+ case NEON_CMHI: cmp(vf, rd, rn, rm, hi); break;
+ case NEON_CMHS: cmp(vf, rd, rn, rm, hs); break;
+ case NEON_CMTST: cmptst(vf, rd, rn, rm); break;
+ case NEON_MLS: mls(vf, rd, rn, rm); break;
+ case NEON_MLA: mla(vf, rd, rn, rm); break;
+ case NEON_MUL: mul(vf, rd, rn, rm); break;
+ case NEON_PMUL: pmul(vf, rd, rn, rm); break;
+ case NEON_SMAX: smax(vf, rd, rn, rm); break;
+ case NEON_SMAXP: smaxp(vf, rd, rn, rm); break;
+ case NEON_SMIN: smin(vf, rd, rn, rm); break;
+ case NEON_SMINP: sminp(vf, rd, rn, rm); break;
+ case NEON_SUB: sub(vf, rd, rn, rm); break;
+ case NEON_UMAX: umax(vf, rd, rn, rm); break;
+ case NEON_UMAXP: umaxp(vf, rd, rn, rm); break;
+ case NEON_UMIN: umin(vf, rd, rn, rm); break;
+ case NEON_UMINP: uminp(vf, rd, rn, rm); break;
+ case NEON_SSHL: sshl(vf, rd, rn, rm); break;
+ case NEON_USHL: ushl(vf, rd, rn, rm); break;
+ case NEON_SABD: absdiff(vf, rd, rn, rm, true); break;
+ case NEON_UABD: absdiff(vf, rd, rn, rm, false); break;
+ case NEON_SABA: saba(vf, rd, rn, rm); break;
+ case NEON_UABA: uaba(vf, rd, rn, rm); break;
+ case NEON_UQADD: add(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQADD: add(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_UQSUB: sub(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQSUB: sub(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_SQDMULH: sqdmulh(vf, rd, rn, rm); break;
+ case NEON_SQRDMULH: sqrdmulh(vf, rd, rn, rm); break;
+ case NEON_UQSHL: ushl(vf, rd, rn, rm).UnsignedSaturate(vf); break;
+ case NEON_SQSHL: sshl(vf, rd, rn, rm).SignedSaturate(vf); break;
+ case NEON_URSHL: ushl(vf, rd, rn, rm).Round(vf); break;
+ case NEON_SRSHL: sshl(vf, rd, rn, rm).Round(vf); break;
+ case NEON_UQRSHL:
+ ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
+ break;
+ case NEON_SQRSHL:
+ sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
+ break;
+ case NEON_UHADD:
+ add(vf, rd, rn, rm).Uhalve(vf);
+ break;
+ case NEON_URHADD:
+ add(vf, rd, rn, rm).Uhalve(vf).Round(vf);
+ break;
+ case NEON_SHADD:
+ add(vf, rd, rn, rm).Halve(vf);
+ break;
+ case NEON_SRHADD:
+ add(vf, rd, rn, rm).Halve(vf).Round(vf);
+ break;
+ case NEON_UHSUB:
+ sub(vf, rd, rn, rm).Uhalve(vf);
+ break;
+ case NEON_SHSUB:
+ sub(vf, rd, rn, rm).Halve(vf);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEON3Different(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+ VectorFormat vf_l = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEON3DifferentMask)) {
+ case NEON_PMULL: pmull(vf_l, rd, rn, rm); break;
+ case NEON_PMULL2: pmull2(vf_l, rd, rn, rm); break;
+ case NEON_UADDL: uaddl(vf_l, rd, rn, rm); break;
+ case NEON_UADDL2: uaddl2(vf_l, rd, rn, rm); break;
+ case NEON_SADDL: saddl(vf_l, rd, rn, rm); break;
+ case NEON_SADDL2: saddl2(vf_l, rd, rn, rm); break;
+ case NEON_USUBL: usubl(vf_l, rd, rn, rm); break;
+ case NEON_USUBL2: usubl2(vf_l, rd, rn, rm); break;
+ case NEON_SSUBL: ssubl(vf_l, rd, rn, rm); break;
+ case NEON_SSUBL2: ssubl2(vf_l, rd, rn, rm); break;
+ case NEON_SABAL: sabal(vf_l, rd, rn, rm); break;
+ case NEON_SABAL2: sabal2(vf_l, rd, rn, rm); break;
+ case NEON_UABAL: uabal(vf_l, rd, rn, rm); break;
+ case NEON_UABAL2: uabal2(vf_l, rd, rn, rm); break;
+ case NEON_SABDL: sabdl(vf_l, rd, rn, rm); break;
+ case NEON_SABDL2: sabdl2(vf_l, rd, rn, rm); break;
+ case NEON_UABDL: uabdl(vf_l, rd, rn, rm); break;
+ case NEON_UABDL2: uabdl2(vf_l, rd, rn, rm); break;
+ case NEON_SMLAL: smlal(vf_l, rd, rn, rm); break;
+ case NEON_SMLAL2: smlal2(vf_l, rd, rn, rm); break;
+ case NEON_UMLAL: umlal(vf_l, rd, rn, rm); break;
+ case NEON_UMLAL2: umlal2(vf_l, rd, rn, rm); break;
+ case NEON_SMLSL: smlsl(vf_l, rd, rn, rm); break;
+ case NEON_SMLSL2: smlsl2(vf_l, rd, rn, rm); break;
+ case NEON_UMLSL: umlsl(vf_l, rd, rn, rm); break;
+ case NEON_UMLSL2: umlsl2(vf_l, rd, rn, rm); break;
+ case NEON_SMULL: smull(vf_l, rd, rn, rm); break;
+ case NEON_SMULL2: smull2(vf_l, rd, rn, rm); break;
+ case NEON_UMULL: umull(vf_l, rd, rn, rm); break;
+ case NEON_UMULL2: umull2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLAL: sqdmlal(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLAL2: sqdmlal2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLSL: sqdmlsl(vf_l, rd, rn, rm); break;
+ case NEON_SQDMLSL2: sqdmlsl2(vf_l, rd, rn, rm); break;
+ case NEON_SQDMULL: sqdmull(vf_l, rd, rn, rm); break;
+ case NEON_SQDMULL2: sqdmull2(vf_l, rd, rn, rm); break;
+ case NEON_UADDW: uaddw(vf_l, rd, rn, rm); break;
+ case NEON_UADDW2: uaddw2(vf_l, rd, rn, rm); break;
+ case NEON_SADDW: saddw(vf_l, rd, rn, rm); break;
+ case NEON_SADDW2: saddw2(vf_l, rd, rn, rm); break;
+ case NEON_USUBW: usubw(vf_l, rd, rn, rm); break;
+ case NEON_USUBW2: usubw2(vf_l, rd, rn, rm); break;
+ case NEON_SSUBW: ssubw(vf_l, rd, rn, rm); break;
+ case NEON_SSUBW2: ssubw2(vf_l, rd, rn, rm); break;
+ case NEON_ADDHN: addhn(vf, rd, rn, rm); break;
+ case NEON_ADDHN2: addhn2(vf, rd, rn, rm); break;
+ case NEON_RADDHN: raddhn(vf, rd, rn, rm); break;
+ case NEON_RADDHN2: raddhn2(vf, rd, rn, rm); break;
+ case NEON_SUBHN: subhn(vf, rd, rn, rm); break;
+ case NEON_SUBHN2: subhn2(vf, rd, rn, rm); break;
+ case NEON_RSUBHN: rsubhn(vf, rd, rn, rm); break;
+ case NEON_RSUBHN2: rsubhn2(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONAcrossLanes(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ // The input operand's VectorFormat is passed for these instructions.
+ if (instr->Mask(NEONAcrossLanesFPFMask) == NEONAcrossLanesFPFixed) {
+ VectorFormat vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+
+ switch (instr->Mask(NEONAcrossLanesFPMask)) {
+ case NEON_FMAXV: fmaxv(vf, rd, rn); break;
+ case NEON_FMINV: fminv(vf, rd, rn); break;
+ case NEON_FMAXNMV: fmaxnmv(vf, rd, rn); break;
+ case NEON_FMINNMV: fminnmv(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ switch (instr->Mask(NEONAcrossLanesMask)) {
+ case NEON_ADDV: addv(vf, rd, rn); break;
+ case NEON_SMAXV: smaxv(vf, rd, rn); break;
+ case NEON_SMINV: sminv(vf, rd, rn); break;
+ case NEON_UMAXV: umaxv(vf, rd, rn); break;
+ case NEON_UMINV: uminv(vf, rd, rn); break;
+ case NEON_SADDLV: saddlv(vf, rd, rn); break;
+ case NEON_UADDLV: uaddlv(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEONByIndexedElement(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf_r = nfd.GetVectorFormat();
+ VectorFormat vf = nfd.GetVectorFormat(nfd.LongIntegerFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ ByElementOp Op = NULL;
+
+ int rm_reg = instr->Rm();
+ int index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ rm_reg &= 0xf;
+ index = (index << 1) | instr->NEONM();
+ }
+
+ switch (instr->Mask(NEONByIndexedElementMask)) {
+ case NEON_MUL_byelement: Op = &Simulator::mul; vf = vf_r; break;
+ case NEON_MLA_byelement: Op = &Simulator::mla; vf = vf_r; break;
+ case NEON_MLS_byelement: Op = &Simulator::mls; vf = vf_r; break;
+ case NEON_SQDMULH_byelement: Op = &Simulator::sqdmulh; vf = vf_r; break;
+ case NEON_SQRDMULH_byelement: Op = &Simulator::sqrdmulh; vf = vf_r; break;
+ case NEON_SMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smull2;
+ } else {
+ Op = &Simulator::smull;
+ }
+ break;
+ case NEON_UMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umull2;
+ } else {
+ Op = &Simulator::umull;
+ }
+ break;
+ case NEON_SMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smlal2;
+ } else {
+ Op = &Simulator::smlal;
+ }
+ break;
+ case NEON_UMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umlal2;
+ } else {
+ Op = &Simulator::umlal;
+ }
+ break;
+ case NEON_SMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::smlsl2;
+ } else {
+ Op = &Simulator::smlsl;
+ }
+ break;
+ case NEON_UMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::umlsl2;
+ } else {
+ Op = &Simulator::umlsl;
+ }
+ break;
+ case NEON_SQDMULL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmull2;
+ } else {
+ Op = &Simulator::sqdmull;
+ }
+ break;
+ case NEON_SQDMLAL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmlal2;
+ } else {
+ Op = &Simulator::sqdmlal;
+ }
+ break;
+ case NEON_SQDMLSL_byelement:
+ if (instr->Mask(NEON_Q)) {
+ Op = &Simulator::sqdmlsl2;
+ } else {
+ Op = &Simulator::sqdmlsl;
+ }
+ break;
+ default:
+ index = instr->NEONH();
+ if ((instr->FPType() & 1) == 0) {
+ index = (index << 1) | instr->NEONL();
+ }
+
+ vf = nfd.GetVectorFormat(nfd.FPFormatMap());
+
+ switch (instr->Mask(NEONByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement: Op = &Simulator::fmul; break;
+ case NEON_FMLA_byelement: Op = &Simulator::fmla; break;
+ case NEON_FMLS_byelement: Op = &Simulator::fmls; break;
+ case NEON_FMULX_byelement: Op = &Simulator::fmulx; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
+}
+
+
+void Simulator::VisitNEONCopy(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ int imm5 = instr->ImmNEON5();
+ int tz = CountTrailingZeros(imm5, 32);
+ int reg_index = imm5 >> (tz + 1);
+
+ if (instr->Mask(NEONCopyInsElementMask) == NEON_INS_ELEMENT) {
+ int imm4 = instr->ImmNEON4();
+ int rn_index = imm4 >> tz;
+ ins_element(vf, rd, reg_index, rn, rn_index);
+ } else if (instr->Mask(NEONCopyInsGeneralMask) == NEON_INS_GENERAL) {
+ ins_immediate(vf, rd, reg_index, xreg(instr->Rn()));
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_UMOV) {
+ uint64_t value = LogicVRegister(rn).Uint(vf, reg_index);
+ value &= MaxUintFromFormat(vf);
+ set_xreg(instr->Rd(), value);
+ } else if (instr->Mask(NEONCopyUmovMask) == NEON_SMOV) {
+ int64_t value = LogicVRegister(rn).Int(vf, reg_index);
+ if (instr->NEONQ()) {
+ set_xreg(instr->Rd(), value);
+ } else {
+ set_wreg(instr->Rd(), (int32_t)value);
+ }
+ } else if (instr->Mask(NEONCopyDupElementMask) == NEON_DUP_ELEMENT) {
+ dup_element(vf, rd, rn, reg_index);
+ } else if (instr->Mask(NEONCopyDupGeneralMask) == NEON_DUP_GENERAL) {
+ dup_immediate(vf, rd, xreg(instr->Rn()));
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONExtract(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+ if (instr->Mask(NEONExtractMask) == NEON_EXT) {
+ int index = instr->ImmNEONExt();
+ ext(vf, rd, rn, rm, index);
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::NEONLoadStoreMultiStructHelper(const Instruction* instr,
+ AddrMode addr_mode) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ uint64_t addr_base = xreg(instr->Rn(), Reg31IsStackPointer);
+ int reg_size = RegisterSizeInBytesFromFormat(vf);
+
+ int reg[4];
+ uint64_t addr[4];
+ for (int i = 0; i < 4; i++) {
+ reg[i] = (instr->Rt() + i) % kNumberOfVRegisters;
+ addr[i] = addr_base + (i * reg_size);
+ }
+ int count = 1;
+ bool log_read = true;
+
+ Instr itype = instr->Mask(NEONLoadStoreMultiStructMask);
+ if (((itype == NEON_LD1_1v) || (itype == NEON_LD1_2v) ||
+ (itype == NEON_LD1_3v) || (itype == NEON_LD1_4v) ||
+ (itype == NEON_ST1_1v) || (itype == NEON_ST1_2v) ||
+ (itype == NEON_ST1_3v) || (itype == NEON_ST1_4v)) &&
+ (instr->Bits(20, 16) != 0)) {
+ VIXL_UNREACHABLE();
+ }
+
+ // We use the PostIndex mask here, as it works in this case for both Offset
+ // and PostIndex addressing.
+ switch (instr->Mask(NEONLoadStoreMultiStructPostIndexMask)) {
+ case NEON_LD1_4v:
+ case NEON_LD1_4v_post: ld1(vf, vreg(reg[3]), addr[3]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_3v:
+ case NEON_LD1_3v_post: ld1(vf, vreg(reg[2]), addr[2]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_2v:
+ case NEON_LD1_2v_post: ld1(vf, vreg(reg[1]), addr[1]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_LD1_1v:
+ case NEON_LD1_1v_post:
+ ld1(vf, vreg(reg[0]), addr[0]);
+ log_read = true;
+ break;
+ case NEON_ST1_4v:
+ case NEON_ST1_4v_post: st1(vf, vreg(reg[3]), addr[3]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_3v:
+ case NEON_ST1_3v_post: st1(vf, vreg(reg[2]), addr[2]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_2v:
+ case NEON_ST1_2v_post: st1(vf, vreg(reg[1]), addr[1]); count++;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_1v:
+ case NEON_ST1_1v_post:
+ st1(vf, vreg(reg[0]), addr[0]);
+ log_read = false;
+ break;
+ case NEON_LD2_post:
+ case NEON_LD2:
+ ld2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
+ count = 2;
+ break;
+ case NEON_ST2:
+ case NEON_ST2_post:
+ st2(vf, vreg(reg[0]), vreg(reg[1]), addr[0]);
+ count = 2;
+ break;
+ case NEON_LD3_post:
+ case NEON_LD3:
+ ld3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
+ count = 3;
+ break;
+ case NEON_ST3:
+ case NEON_ST3_post:
+ st3(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), addr[0]);
+ count = 3;
+ break;
+ case NEON_ST4:
+ case NEON_ST4_post:
+ st4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]),
+ addr[0]);
+ count = 4;
+ break;
+ case NEON_LD4_post:
+ case NEON_LD4:
+ ld4(vf, vreg(reg[0]), vreg(reg[1]), vreg(reg[2]), vreg(reg[3]),
+ addr[0]);
+ count = 4;
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ // Explicitly log the register update whilst we have type information.
+ for (int i = 0; i < count; i++) {
+ // For de-interleaving loads, only print the base address.
+ int lane_size = LaneSizeInBytesFromFormat(vf);
+ PrintRegisterFormat format = GetPrintRegisterFormatTryFP(
+ GetPrintRegisterFormatForSize(reg_size, lane_size));
+ if (log_read) {
+ LogVRead(addr_base, reg[i], format);
+ } else {
+ LogVWrite(addr_base, reg[i], format);
+ }
+ }
+
+ if (addr_mode == PostIndex) {
+ int rm = instr->Rm();
+ // The immediate post index addressing mode is indicated by rm = 31.
+ // The immediate is implied by the number of vector registers used.
+ addr_base += (rm == 31) ? RegisterSizeInBytesFromFormat(vf) * count
+ : xreg(rm);
+ set_xreg(instr->Rn(), addr_base);
+ } else {
+ VIXL_ASSERT(addr_mode == Offset);
+ }
+}
+
+
+void Simulator::VisitNEONLoadStoreMultiStruct(const Instruction* instr) {
+ NEONLoadStoreMultiStructHelper(instr, Offset);
+}
+
+
+void Simulator::VisitNEONLoadStoreMultiStructPostIndex(
+ const Instruction* instr) {
+ NEONLoadStoreMultiStructHelper(instr, PostIndex);
+}
+
+
+void Simulator::NEONLoadStoreSingleStructHelper(const Instruction* instr,
+ AddrMode addr_mode) {
+ uint64_t addr = xreg(instr->Rn(), Reg31IsStackPointer);
+ int rt = instr->Rt();
+
+ Instr itype = instr->Mask(NEONLoadStoreSingleStructMask);
+ if (((itype == NEON_LD1_b) || (itype == NEON_LD1_h) ||
+ (itype == NEON_LD1_s) || (itype == NEON_LD1_d)) &&
+ (instr->Bits(20, 16) != 0)) {
+ VIXL_UNREACHABLE();
+ }
+
+ // We use the PostIndex mask here, as it works in this case for both Offset
+ // and PostIndex addressing.
+ bool do_load = false;
+
+ bool replicating = false;
+
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LoadStoreFormatMap());
+ VectorFormat vf_t = nfd.GetVectorFormat();
+
+ VectorFormat vf = kFormat16B;
+ switch (instr->Mask(NEONLoadStoreSingleStructPostIndexMask)) {
+ case NEON_LD1_b:
+ case NEON_LD1_b_post:
+ case NEON_LD2_b:
+ case NEON_LD2_b_post:
+ case NEON_LD3_b:
+ case NEON_LD3_b_post:
+ case NEON_LD4_b:
+ case NEON_LD4_b_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_b:
+ case NEON_ST1_b_post:
+ case NEON_ST2_b:
+ case NEON_ST2_b_post:
+ case NEON_ST3_b:
+ case NEON_ST3_b_post:
+ case NEON_ST4_b:
+ case NEON_ST4_b_post: break;
+
+ case NEON_LD1_h:
+ case NEON_LD1_h_post:
+ case NEON_LD2_h:
+ case NEON_LD2_h_post:
+ case NEON_LD3_h:
+ case NEON_LD3_h_post:
+ case NEON_LD4_h:
+ case NEON_LD4_h_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_h:
+ case NEON_ST1_h_post:
+ case NEON_ST2_h:
+ case NEON_ST2_h_post:
+ case NEON_ST3_h:
+ case NEON_ST3_h_post:
+ case NEON_ST4_h:
+ case NEON_ST4_h_post: vf = kFormat8H; break;
+ case NEON_LD1_s:
+ case NEON_LD1_s_post:
+ case NEON_LD2_s:
+ case NEON_LD2_s_post:
+ case NEON_LD3_s:
+ case NEON_LD3_s_post:
+ case NEON_LD4_s:
+ case NEON_LD4_s_post: do_load = true;
+ VIXL_FALLTHROUGH();
+ case NEON_ST1_s:
+ case NEON_ST1_s_post:
+ case NEON_ST2_s:
+ case NEON_ST2_s_post:
+ case NEON_ST3_s:
+ case NEON_ST3_s_post:
+ case NEON_ST4_s:
+ case NEON_ST4_s_post: {
+ VIXL_STATIC_ASSERT((NEON_LD1_s | (1 << NEONLSSize_offset)) == NEON_LD1_d);
+ VIXL_STATIC_ASSERT(
+ (NEON_LD1_s_post | (1 << NEONLSSize_offset)) == NEON_LD1_d_post);
+ VIXL_STATIC_ASSERT((NEON_ST1_s | (1 << NEONLSSize_offset)) == NEON_ST1_d);
+ VIXL_STATIC_ASSERT(
+ (NEON_ST1_s_post | (1 << NEONLSSize_offset)) == NEON_ST1_d_post);
+ vf = ((instr->NEONLSSize() & 1) == 0) ? kFormat4S : kFormat2D;
+ break;
+ }
+
+ case NEON_LD1R:
+ case NEON_LD1R_post:
+ case NEON_LD2R:
+ case NEON_LD2R_post:
+ case NEON_LD3R:
+ case NEON_LD3R_post:
+ case NEON_LD4R:
+ case NEON_LD4R_post: {
+ vf = vf_t;
+ do_load = true;
+ replicating = true;
+ break;
+ }
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ PrintRegisterFormat print_format =
+ GetPrintRegisterFormatTryFP(GetPrintRegisterFormat(vf));
+ // Make sure that the print_format only includes a single lane.
+ print_format =
+ static_cast<PrintRegisterFormat>(print_format & ~kPrintRegAsVectorMask);
+
+ int esize = LaneSizeInBytesFromFormat(vf);
+ int index_shift = LaneSizeInBytesLog2FromFormat(vf);
+ int lane = instr->NEONLSIndex(index_shift);
+ int scale = 0;
+ int rt2 = (rt + 1) % kNumberOfVRegisters;
+ int rt3 = (rt2 + 1) % kNumberOfVRegisters;
+ int rt4 = (rt3 + 1) % kNumberOfVRegisters;
+ switch (instr->Mask(NEONLoadStoreSingleLenMask)) {
+ case NEONLoadStoreSingle1:
+ scale = 1;
+ if (do_load) {
+ if (replicating) {
+ ld1r(vf, vreg(rt), addr);
+ } else {
+ ld1(vf, vreg(rt), lane, addr);
+ }
+ LogVRead(addr, rt, print_format, lane);
+ } else {
+ st1(vf, vreg(rt), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle2:
+ scale = 2;
+ if (do_load) {
+ if (replicating) {
+ ld2r(vf, vreg(rt), vreg(rt2), addr);
+ } else {
+ ld2(vf, vreg(rt), vreg(rt2), lane, addr);
+ }
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ } else {
+ st2(vf, vreg(rt), vreg(rt2), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle3:
+ scale = 3;
+ if (do_load) {
+ if (replicating) {
+ ld3r(vf, vreg(rt), vreg(rt2), vreg(rt3), addr);
+ } else {
+ ld3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
+ }
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ LogVRead(addr + (2 * esize), rt3, print_format, lane);
+ } else {
+ st3(vf, vreg(rt), vreg(rt2), vreg(rt3), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ LogVWrite(addr + (2 * esize), rt3, print_format, lane);
+ }
+ break;
+ case NEONLoadStoreSingle4:
+ scale = 4;
+ if (do_load) {
+ if (replicating) {
+ ld4r(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), addr);
+ } else {
+ ld4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
+ }
+ LogVRead(addr, rt, print_format, lane);
+ LogVRead(addr + esize, rt2, print_format, lane);
+ LogVRead(addr + (2 * esize), rt3, print_format, lane);
+ LogVRead(addr + (3 * esize), rt4, print_format, lane);
+ } else {
+ st4(vf, vreg(rt), vreg(rt2), vreg(rt3), vreg(rt4), lane, addr);
+ LogVWrite(addr, rt, print_format, lane);
+ LogVWrite(addr + esize, rt2, print_format, lane);
+ LogVWrite(addr + (2 * esize), rt3, print_format, lane);
+ LogVWrite(addr + (3 * esize), rt4, print_format, lane);
+ }
+ break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+
+ if (addr_mode == PostIndex) {
+ int rm = instr->Rm();
+ int lane_size = LaneSizeInBytesFromFormat(vf);
+ set_xreg(instr->Rn(), addr + ((rm == 31) ? (scale * lane_size) : xreg(rm)));
+ }
+}
+
+
+void Simulator::VisitNEONLoadStoreSingleStruct(const Instruction* instr) {
+ NEONLoadStoreSingleStructHelper(instr, Offset);
+}
+
+
+void Simulator::VisitNEONLoadStoreSingleStructPostIndex(
+ const Instruction* instr) {
+ NEONLoadStoreSingleStructHelper(instr, PostIndex);
+}
+
+
+void Simulator::VisitNEONModifiedImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ int cmode = instr->NEONCmode();
+ int cmode_3_1 = (cmode >> 1) & 7;
+ int cmode_3 = (cmode >> 3) & 1;
+ int cmode_2 = (cmode >> 2) & 1;
+ int cmode_1 = (cmode >> 1) & 1;
+ int cmode_0 = cmode & 1;
+ int q = instr->NEONQ();
+ int op_bit = instr->NEONModImmOp();
+ uint64_t imm8 = instr->ImmNEONabcdefgh();
+
+ // Find the format and immediate value
+ uint64_t imm = 0;
+ VectorFormat vform = kFormatUndefined;
+ switch (cmode_3_1) {
+ case 0x0:
+ case 0x1:
+ case 0x2:
+ case 0x3:
+ vform = (q == 1) ? kFormat4S : kFormat2S;
+ imm = imm8 << (8 * cmode_3_1);
+ break;
+ case 0x4:
+ case 0x5:
+ vform = (q == 1) ? kFormat8H : kFormat4H;
+ imm = imm8 << (8 * cmode_1);
+ break;
+ case 0x6:
+ vform = (q == 1) ? kFormat4S : kFormat2S;
+ if (cmode_0 == 0) {
+ imm = imm8 << 8 | 0x000000ff;
+ } else {
+ imm = imm8 << 16 | 0x0000ffff;
+ }
+ break;
+ case 0x7:
+ if (cmode_0 == 0 && op_bit == 0) {
+ vform = q ? kFormat16B : kFormat8B;
+ imm = imm8;
+ } else if (cmode_0 == 0 && op_bit == 1) {
+ vform = q ? kFormat2D : kFormat1D;
+ imm = 0;
+ for (int i = 0; i < 8; ++i) {
+ if (imm8 & (1ULL << i)) {
+ imm |= (UINT64_C(0xff) << (8 * i));
+ }
+ }
+ } else { // cmode_0 == 1, cmode == 0xf.
+ if (op_bit == 0) {
+ vform = q ? kFormat4S : kFormat2S;
+ imm = FloatToRawbits(instr->ImmNEONFP32());
+ } else if (q == 1) {
+ vform = kFormat2D;
+ imm = DoubleToRawbits(instr->ImmNEONFP64());
+ } else {
+ VIXL_ASSERT((q == 0) && (op_bit == 1) && (cmode == 0xf));
+ VisitUnallocated(instr);
+ }
+ }
+ break;
+ default: VIXL_UNREACHABLE(); break;
+ }
+
+ // Find the operation
+ NEONModifiedImmediateOp op;
+ if (cmode_3 == 0) {
+ if (cmode_0 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
+ }
+ } else { // cmode<3> == '1'
+ if (cmode_2 == 0) {
+ if (cmode_0 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = op_bit ? NEONModifiedImmediate_BIC : NEONModifiedImmediate_ORR;
+ }
+ } else { // cmode<2> == '1'
+ if (cmode_1 == 0) {
+ op = op_bit ? NEONModifiedImmediate_MVNI : NEONModifiedImmediate_MOVI;
+ } else { // cmode<1> == '1'
+ if (cmode_0 == 0) {
+ op = NEONModifiedImmediate_MOVI;
+ } else { // cmode<0> == '1'
+ op = NEONModifiedImmediate_MOVI;
+ }
+ }
+ }
+ }
+
+ // Call the logic function
+ if (op == NEONModifiedImmediate_ORR) {
+ orr(vform, rd, rd, imm);
+ } else if (op == NEONModifiedImmediate_BIC) {
+ bic(vform, rd, rd, imm);
+ } else if (op == NEONModifiedImmediate_MOVI) {
+ movi(vform, rd, imm);
+ } else if (op == NEONModifiedImmediate_MVNI) {
+ mvni(vform, rd, imm);
+ } else {
+ VisitUnimplemented(instr);
+ }
+}
+
+
+void Simulator::VisitNEONScalar2RegMisc(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEON2RegMiscOpcode) <= NEON_NEG_scalar_opcode) {
+ // These instructions all use a two bit size field, except NOT and RBIT,
+ // which use the field to encode the operation.
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_CMEQ_zero_scalar: cmp(vf, rd, rn, 0, eq); break;
+ case NEON_CMGE_zero_scalar: cmp(vf, rd, rn, 0, ge); break;
+ case NEON_CMGT_zero_scalar: cmp(vf, rd, rn, 0, gt); break;
+ case NEON_CMLT_zero_scalar: cmp(vf, rd, rn, 0, lt); break;
+ case NEON_CMLE_zero_scalar: cmp(vf, rd, rn, 0, le); break;
+ case NEON_ABS_scalar: abs(vf, rd, rn); break;
+ case NEON_SQABS_scalar: abs(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_NEG_scalar: neg(vf, rd, rn); break;
+ case NEON_SQNEG_scalar: neg(vf, rd, rn).SignedSaturate(vf); break;
+ case NEON_SUQADD_scalar: suqadd(vf, rd, rn); break;
+ case NEON_USQADD_scalar: usqadd(vf, rd, rn); break;
+ default: VIXL_UNIMPLEMENTED(); break;
+ }
+ } else {
+ VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ // These instructions all use a one bit size field, except SQXTUN, SQXTN
+ // and UQXTN, which use a two bit size field.
+ switch (instr->Mask(NEONScalar2RegMiscFPMask)) {
+ case NEON_FRECPE_scalar: frecpe(fpf, rd, rn, fpcr_rounding); break;
+ case NEON_FRECPX_scalar: frecpx(fpf, rd, rn); break;
+ case NEON_FRSQRTE_scalar: frsqrte(fpf, rd, rn); break;
+ case NEON_FCMGT_zero_scalar: fcmp_zero(fpf, rd, rn, gt); break;
+ case NEON_FCMGE_zero_scalar: fcmp_zero(fpf, rd, rn, ge); break;
+ case NEON_FCMEQ_zero_scalar: fcmp_zero(fpf, rd, rn, eq); break;
+ case NEON_FCMLE_zero_scalar: fcmp_zero(fpf, rd, rn, le); break;
+ case NEON_FCMLT_zero_scalar: fcmp_zero(fpf, rd, rn, lt); break;
+ case NEON_SCVTF_scalar: scvtf(fpf, rd, rn, 0, fpcr_rounding); break;
+ case NEON_UCVTF_scalar: ucvtf(fpf, rd, rn, 0, fpcr_rounding); break;
+ case NEON_FCVTNS_scalar: fcvts(fpf, rd, rn, FPTieEven); break;
+ case NEON_FCVTNU_scalar: fcvtu(fpf, rd, rn, FPTieEven); break;
+ case NEON_FCVTPS_scalar: fcvts(fpf, rd, rn, FPPositiveInfinity); break;
+ case NEON_FCVTPU_scalar: fcvtu(fpf, rd, rn, FPPositiveInfinity); break;
+ case NEON_FCVTMS_scalar: fcvts(fpf, rd, rn, FPNegativeInfinity); break;
+ case NEON_FCVTMU_scalar: fcvtu(fpf, rd, rn, FPNegativeInfinity); break;
+ case NEON_FCVTZS_scalar: fcvts(fpf, rd, rn, FPZero); break;
+ case NEON_FCVTZU_scalar: fcvtu(fpf, rd, rn, FPZero); break;
+ case NEON_FCVTAS_scalar: fcvts(fpf, rd, rn, FPTieAway); break;
+ case NEON_FCVTAU_scalar: fcvtu(fpf, rd, rn, FPTieAway); break;
+ case NEON_FCVTXN_scalar:
+ // Unlike all of the other FP instructions above, fcvtxn encodes dest
+ // size S as size<0>=1. There's only one case, so we ignore the form.
+ VIXL_ASSERT(instr->Bit(22) == 1);
+ fcvtxn(kFormatS, rd, rn);
+ break;
+ default:
+ switch (instr->Mask(NEONScalar2RegMiscMask)) {
+ case NEON_SQXTN_scalar: sqxtn(vf, rd, rn); break;
+ case NEON_UQXTN_scalar: uqxtn(vf, rd, rn); break;
+ case NEON_SQXTUN_scalar: sqxtun(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+ }
+}
+
+
+void Simulator::VisitNEONScalar3Diff(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+ switch (instr->Mask(NEONScalar3DiffMask)) {
+ case NEON_SQDMLAL_scalar: sqdmlal(vf, rd, rn, rm); break;
+ case NEON_SQDMLSL_scalar: sqdmlsl(vf, rd, rn, rm); break;
+ case NEON_SQDMULL_scalar: sqdmull(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalar3Same(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::ScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ if (instr->Mask(NEONScalar3SameFPFMask) == NEONScalar3SameFPFixed) {
+ vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ switch (instr->Mask(NEONScalar3SameFPMask)) {
+ case NEON_FMULX_scalar: fmulx(vf, rd, rn, rm); break;
+ case NEON_FACGE_scalar: fabscmp(vf, rd, rn, rm, ge); break;
+ case NEON_FACGT_scalar: fabscmp(vf, rd, rn, rm, gt); break;
+ case NEON_FCMEQ_scalar: fcmp(vf, rd, rn, rm, eq); break;
+ case NEON_FCMGE_scalar: fcmp(vf, rd, rn, rm, ge); break;
+ case NEON_FCMGT_scalar: fcmp(vf, rd, rn, rm, gt); break;
+ case NEON_FRECPS_scalar: frecps(vf, rd, rn, rm); break;
+ case NEON_FRSQRTS_scalar: frsqrts(vf, rd, rn, rm); break;
+ case NEON_FABD_scalar: fabd(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ } else {
+ switch (instr->Mask(NEONScalar3SameMask)) {
+ case NEON_ADD_scalar: add(vf, rd, rn, rm); break;
+ case NEON_SUB_scalar: sub(vf, rd, rn, rm); break;
+ case NEON_CMEQ_scalar: cmp(vf, rd, rn, rm, eq); break;
+ case NEON_CMGE_scalar: cmp(vf, rd, rn, rm, ge); break;
+ case NEON_CMGT_scalar: cmp(vf, rd, rn, rm, gt); break;
+ case NEON_CMHI_scalar: cmp(vf, rd, rn, rm, hi); break;
+ case NEON_CMHS_scalar: cmp(vf, rd, rn, rm, hs); break;
+ case NEON_CMTST_scalar: cmptst(vf, rd, rn, rm); break;
+ case NEON_USHL_scalar: ushl(vf, rd, rn, rm); break;
+ case NEON_SSHL_scalar: sshl(vf, rd, rn, rm); break;
+ case NEON_SQDMULH_scalar: sqdmulh(vf, rd, rn, rm); break;
+ case NEON_SQRDMULH_scalar: sqrdmulh(vf, rd, rn, rm); break;
+ case NEON_UQADD_scalar:
+ add(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQADD_scalar:
+ add(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSUB_scalar:
+ sub(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSUB_scalar:
+ sub(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_UQSHL_scalar:
+ ushl(vf, rd, rn, rm).UnsignedSaturate(vf);
+ break;
+ case NEON_SQSHL_scalar:
+ sshl(vf, rd, rn, rm).SignedSaturate(vf);
+ break;
+ case NEON_URSHL_scalar:
+ ushl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_SRSHL_scalar:
+ sshl(vf, rd, rn, rm).Round(vf);
+ break;
+ case NEON_UQRSHL_scalar:
+ ushl(vf, rd, rn, rm).Round(vf).UnsignedSaturate(vf);
+ break;
+ case NEON_SQRSHL_scalar:
+ sshl(vf, rd, rn, rm).Round(vf).SignedSaturate(vf);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+ }
+}
+
+
+void Simulator::VisitNEONScalarByIndexedElement(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LongScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+ VectorFormat vf_r = nfd.GetVectorFormat(nfd.ScalarFormatMap());
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ ByElementOp Op = NULL;
+
+ int rm_reg = instr->Rm();
+ int index = (instr->NEONH() << 1) | instr->NEONL();
+ if (instr->NEONSize() == 1) {
+ rm_reg &= 0xf;
+ index = (index << 1) | instr->NEONM();
+ }
+
+ switch (instr->Mask(NEONScalarByIndexedElementMask)) {
+ case NEON_SQDMULL_byelement_scalar: Op = &Simulator::sqdmull; break;
+ case NEON_SQDMLAL_byelement_scalar: Op = &Simulator::sqdmlal; break;
+ case NEON_SQDMLSL_byelement_scalar: Op = &Simulator::sqdmlsl; break;
+ case NEON_SQDMULH_byelement_scalar:
+ Op = &Simulator::sqdmulh;
+ vf = vf_r;
+ break;
+ case NEON_SQRDMULH_byelement_scalar:
+ Op = &Simulator::sqrdmulh;
+ vf = vf_r;
+ break;
+ default:
+ vf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
+ index = instr->NEONH();
+ if ((instr->FPType() & 1) == 0) {
+ index = (index << 1) | instr->NEONL();
+ }
+ switch (instr->Mask(NEONScalarByIndexedElementFPMask)) {
+ case NEON_FMUL_byelement_scalar: Op = &Simulator::fmul; break;
+ case NEON_FMLA_byelement_scalar: Op = &Simulator::fmla; break;
+ case NEON_FMLS_byelement_scalar: Op = &Simulator::fmls; break;
+ case NEON_FMULX_byelement_scalar: Op = &Simulator::fmulx; break;
+ default: VIXL_UNIMPLEMENTED();
+ }
+ }
+
+ (this->*Op)(vf, rd, rn, vreg(rm_reg), index);
+}
+
+
+void Simulator::VisitNEONScalarCopy(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::TriangularScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+
+ if (instr->Mask(NEONScalarCopyMask) == NEON_DUP_ELEMENT_scalar) {
+ int imm5 = instr->ImmNEON5();
+ int tz = CountTrailingZeros(imm5, 32);
+ int rn_index = imm5 >> (tz + 1);
+ dup_element(vf, rd, rn, rn_index);
+ } else {
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalarPairwise(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::FPScalarFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ switch (instr->Mask(NEONScalarPairwiseMask)) {
+ case NEON_ADDP_scalar: addp(vf, rd, rn); break;
+ case NEON_FADDP_scalar: faddp(vf, rd, rn); break;
+ case NEON_FMAXP_scalar: fmaxp(vf, rd, rn); break;
+ case NEON_FMAXNMP_scalar: fmaxnmp(vf, rd, rn); break;
+ case NEON_FMINP_scalar: fminp(vf, rd, rn); break;
+ case NEON_FMINNMP_scalar: fminnmp(vf, rd, rn); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONScalarShiftImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ static const NEONFormatMap map = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_B, NF_H, NF_H, NF_S, NF_S, NF_S, NF_S,
+ NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D, NF_D}
+ };
+ NEONFormatDecoder nfd(instr, &map);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
+ int immhimmb = instr->ImmNEONImmhImmb();
+ int right_shift = (16 << highestSetBit) - immhimmb;
+ int left_shift = immhimmb - (8 << highestSetBit);
+ switch (instr->Mask(NEONScalarShiftImmediateMask)) {
+ case NEON_SHL_scalar: shl(vf, rd, rn, left_shift); break;
+ case NEON_SLI_scalar: sli(vf, rd, rn, left_shift); break;
+ case NEON_SQSHL_imm_scalar: sqshl(vf, rd, rn, left_shift); break;
+ case NEON_UQSHL_imm_scalar: uqshl(vf, rd, rn, left_shift); break;
+ case NEON_SQSHLU_scalar: sqshlu(vf, rd, rn, left_shift); break;
+ case NEON_SRI_scalar: sri(vf, rd, rn, right_shift); break;
+ case NEON_SSHR_scalar: sshr(vf, rd, rn, right_shift); break;
+ case NEON_USHR_scalar: ushr(vf, rd, rn, right_shift); break;
+ case NEON_SRSHR_scalar: sshr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_URSHR_scalar: ushr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_SSRA_scalar: ssra(vf, rd, rn, right_shift); break;
+ case NEON_USRA_scalar: usra(vf, rd, rn, right_shift); break;
+ case NEON_SRSRA_scalar: srsra(vf, rd, rn, right_shift); break;
+ case NEON_URSRA_scalar: ursra(vf, rd, rn, right_shift); break;
+ case NEON_UQSHRN_scalar: uqshrn(vf, rd, rn, right_shift); break;
+ case NEON_UQRSHRN_scalar: uqrshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQSHRN_scalar: sqshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQRSHRN_scalar: sqrshrn(vf, rd, rn, right_shift); break;
+ case NEON_SQSHRUN_scalar: sqshrun(vf, rd, rn, right_shift); break;
+ case NEON_SQRSHRUN_scalar: sqrshrun(vf, rd, rn, right_shift); break;
+ case NEON_FCVTZS_imm_scalar: fcvts(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_FCVTZU_imm_scalar: fcvtu(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_SCVTF_imm_scalar:
+ scvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ case NEON_UCVTF_imm_scalar:
+ ucvtf(vf, rd, rn, right_shift, fpcr_rounding);
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONShiftImmediate(const Instruction* instr) {
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ FPRounding fpcr_rounding = static_cast<FPRounding>(fpcr().RMode());
+
+ // 00010->8B, 00011->16B, 001x0->4H, 001x1->8H,
+ // 01xx0->2S, 01xx1->4S, 1xxx1->2D, all others undefined.
+ static const NEONFormatMap map = {
+ {22, 21, 20, 19, 30},
+ {NF_UNDEF, NF_UNDEF, NF_8B, NF_16B, NF_4H, NF_8H, NF_4H, NF_8H,
+ NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S, NF_2S, NF_4S,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D,
+ NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D, NF_UNDEF, NF_2D}
+ };
+ NEONFormatDecoder nfd(instr, &map);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ // 0001->8H, 001x->4S, 01xx->2D, all others undefined.
+ static const NEONFormatMap map_l = {
+ {22, 21, 20, 19},
+ {NF_UNDEF, NF_8H, NF_4S, NF_4S, NF_2D, NF_2D, NF_2D, NF_2D}
+ };
+ VectorFormat vf_l = nfd.GetVectorFormat(&map_l);
+
+ int highestSetBit = HighestSetBitPosition(instr->ImmNEONImmh());
+ int immhimmb = instr->ImmNEONImmhImmb();
+ int right_shift = (16 << highestSetBit) - immhimmb;
+ int left_shift = immhimmb - (8 << highestSetBit);
+
+ switch (instr->Mask(NEONShiftImmediateMask)) {
+ case NEON_SHL: shl(vf, rd, rn, left_shift); break;
+ case NEON_SLI: sli(vf, rd, rn, left_shift); break;
+ case NEON_SQSHLU: sqshlu(vf, rd, rn, left_shift); break;
+ case NEON_SRI: sri(vf, rd, rn, right_shift); break;
+ case NEON_SSHR: sshr(vf, rd, rn, right_shift); break;
+ case NEON_USHR: ushr(vf, rd, rn, right_shift); break;
+ case NEON_SRSHR: sshr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_URSHR: ushr(vf, rd, rn, right_shift).Round(vf); break;
+ case NEON_SSRA: ssra(vf, rd, rn, right_shift); break;
+ case NEON_USRA: usra(vf, rd, rn, right_shift); break;
+ case NEON_SRSRA: srsra(vf, rd, rn, right_shift); break;
+ case NEON_URSRA: ursra(vf, rd, rn, right_shift); break;
+ case NEON_SQSHL_imm: sqshl(vf, rd, rn, left_shift); break;
+ case NEON_UQSHL_imm: uqshl(vf, rd, rn, left_shift); break;
+ case NEON_SCVTF_imm: scvtf(vf, rd, rn, right_shift, fpcr_rounding); break;
+ case NEON_UCVTF_imm: ucvtf(vf, rd, rn, right_shift, fpcr_rounding); break;
+ case NEON_FCVTZS_imm: fcvts(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_FCVTZU_imm: fcvtu(vf, rd, rn, FPZero, right_shift); break;
+ case NEON_SSHLL:
+ vf = vf_l;
+ if (instr->Mask(NEON_Q)) {
+ sshll2(vf, rd, rn, left_shift);
+ } else {
+ sshll(vf, rd, rn, left_shift);
+ }
+ break;
+ case NEON_USHLL:
+ vf = vf_l;
+ if (instr->Mask(NEON_Q)) {
+ ushll2(vf, rd, rn, left_shift);
+ } else {
+ ushll(vf, rd, rn, left_shift);
+ }
+ break;
+ case NEON_SHRN:
+ if (instr->Mask(NEON_Q)) {
+ shrn2(vf, rd, rn, right_shift);
+ } else {
+ shrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_RSHRN:
+ if (instr->Mask(NEON_Q)) {
+ rshrn2(vf, rd, rn, right_shift);
+ } else {
+ rshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_UQSHRN:
+ if (instr->Mask(NEON_Q)) {
+ uqshrn2(vf, rd, rn, right_shift);
+ } else {
+ uqshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_UQRSHRN:
+ if (instr->Mask(NEON_Q)) {
+ uqrshrn2(vf, rd, rn, right_shift);
+ } else {
+ uqrshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQSHRN:
+ if (instr->Mask(NEON_Q)) {
+ sqshrn2(vf, rd, rn, right_shift);
+ } else {
+ sqshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQRSHRN:
+ if (instr->Mask(NEON_Q)) {
+ sqrshrn2(vf, rd, rn, right_shift);
+ } else {
+ sqrshrn(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQSHRUN:
+ if (instr->Mask(NEON_Q)) {
+ sqshrun2(vf, rd, rn, right_shift);
+ } else {
+ sqshrun(vf, rd, rn, right_shift);
+ }
+ break;
+ case NEON_SQRSHRUN:
+ if (instr->Mask(NEON_Q)) {
+ sqrshrun2(vf, rd, rn, right_shift);
+ } else {
+ sqrshrun(vf, rd, rn, right_shift);
+ }
+ break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONTable(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr, NEONFormatDecoder::LogicalFormatMap());
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rn2 = vreg((instr->Rn() + 1) % kNumberOfVRegisters);
+ SimVRegister& rn3 = vreg((instr->Rn() + 2) % kNumberOfVRegisters);
+ SimVRegister& rn4 = vreg((instr->Rn() + 3) % kNumberOfVRegisters);
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEONTableMask)) {
+ case NEON_TBL_1v: tbl(vf, rd, rn, rm); break;
+ case NEON_TBL_2v: tbl(vf, rd, rn, rn2, rm); break;
+ case NEON_TBL_3v: tbl(vf, rd, rn, rn2, rn3, rm); break;
+ case NEON_TBL_4v: tbl(vf, rd, rn, rn2, rn3, rn4, rm); break;
+ case NEON_TBX_1v: tbx(vf, rd, rn, rm); break;
+ case NEON_TBX_2v: tbx(vf, rd, rn, rn2, rm); break;
+ case NEON_TBX_3v: tbx(vf, rd, rn, rn2, rn3, rm); break;
+ case NEON_TBX_4v: tbx(vf, rd, rn, rn2, rn3, rn4, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::VisitNEONPerm(const Instruction* instr) {
+ NEONFormatDecoder nfd(instr);
+ VectorFormat vf = nfd.GetVectorFormat();
+
+ SimVRegister& rd = vreg(instr->Rd());
+ SimVRegister& rn = vreg(instr->Rn());
+ SimVRegister& rm = vreg(instr->Rm());
+
+ switch (instr->Mask(NEONPermMask)) {
+ case NEON_TRN1: trn1(vf, rd, rn, rm); break;
+ case NEON_TRN2: trn2(vf, rd, rn, rm); break;
+ case NEON_UZP1: uzp1(vf, rd, rn, rm); break;
+ case NEON_UZP2: uzp2(vf, rd, rn, rm); break;
+ case NEON_ZIP1: zip1(vf, rd, rn, rm); break;
+ case NEON_ZIP2: zip2(vf, rd, rn, rm); break;
+ default:
+ VIXL_UNIMPLEMENTED();
+ }
+}
+
+
+void Simulator::DoUnreachable(const Instruction* instr) {
+ VIXL_ASSERT(instr->InstructionBits() == UNDEFINED_INST_PATTERN);
+
+ fprintf(stream_, "Hit UNREACHABLE marker at pc=%p.\n",
+ reinterpret_cast<const void*>(instr));
+ abort();
+}
+
+
+void Simulator::DoTrace(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kTraceOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t parameters;
+ uint32_t command;
+
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
+ memcpy(&command, instr + kTraceCommandOffset, sizeof(command));
+
+ switch (command) {
+ case TRACE_ENABLE:
+ set_trace_parameters(trace_parameters() | parameters);
+ break;
+ case TRACE_DISABLE:
+ set_trace_parameters(trace_parameters() & ~parameters);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ }
+
+ set_pc(instr->InstructionAtOffset(kTraceLength));
+}
+
+
+void Simulator::DoLog(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kLogOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t parameters;
+
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&parameters, instr + kTraceParamsOffset, sizeof(parameters));
+
+ // We don't support a one-shot LOG_DISASM.
+ VIXL_ASSERT((parameters & LOG_DISASM) == 0);
+ // Print the requested information.
+ if (parameters & LOG_SYSREGS) PrintSystemRegisters();
+ if (parameters & LOG_REGS) PrintRegisters();
+ if (parameters & LOG_VREGS) PrintVRegisters();
+
+ set_pc(instr->InstructionAtOffset(kLogLength));
+}
+
+
+void Simulator::DoPrintf(const Instruction* instr) {
+ VIXL_ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+ (instr->ImmException() == kPrintfOpcode));
+
+ // Read the arguments encoded inline in the instruction stream.
+ uint32_t arg_count;
+ uint32_t arg_pattern_list;
+ VIXL_STATIC_ASSERT(sizeof(*instr) == 1);
+ memcpy(&arg_count,
+ instr + kPrintfArgCountOffset,
+ sizeof(arg_count));
+ memcpy(&arg_pattern_list,
+ instr + kPrintfArgPatternListOffset,
+ sizeof(arg_pattern_list));
+
+ VIXL_ASSERT(arg_count <= kPrintfMaxArgCount);
+ VIXL_ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
+
+ // We need to call the host printf function with a set of arguments defined by
+ // arg_pattern_list. Because we don't know the types and sizes of the
+ // arguments, this is very difficult to do in a robust and portable way. To
+ // work around the problem, we pick apart the format string, and print one
+ // format placeholder at a time.
+
+ // Allocate space for the format string. We take a copy, so we can modify it.
+ // Leave enough space for one extra character per expected argument (plus the
+ // '\0' termination).
+ const char * format_base = reg<const char *>(0);
+ VIXL_ASSERT(format_base != NULL);
+ size_t length = strlen(format_base) + 1;
+ char * const format = (char *)js_calloc(length + arg_count);
+
+ // A list of chunks, each with exactly one format placeholder.
+ const char * chunks[kPrintfMaxArgCount];
+
+ // Copy the format string and search for format placeholders.
+ uint32_t placeholder_count = 0;
+ char * format_scratch = format;
+ for (size_t i = 0; i < length; i++) {
+ if (format_base[i] != '%') {
+ *format_scratch++ = format_base[i];
+ } else {
+ if (format_base[i + 1] == '%') {
+ // Ignore explicit "%%" sequences.
+ *format_scratch++ = format_base[i];
+ i++;
+ // Chunks after the first are passed as format strings to printf, so we
+ // need to escape '%' characters in those chunks.
+ if (placeholder_count > 0) *format_scratch++ = format_base[i];
+ } else {
+ VIXL_CHECK(placeholder_count < arg_count);
+ // Insert '\0' before placeholders, and store their locations.
+ *format_scratch++ = '\0';
+ chunks[placeholder_count++] = format_scratch;
+ *format_scratch++ = format_base[i];
+ }
+ }
+ }
+ VIXL_CHECK(placeholder_count == arg_count);
+
+ // Finally, call printf with each chunk, passing the appropriate register
+ // argument. Normally, printf returns the number of bytes transmitted, so we
+ // can emulate a single printf call by adding the result from each chunk. If
+ // any call returns a negative (error) value, though, just return that value.
+
+ printf("%s", clr_printf);
+
+ // Because '\0' is inserted before each placeholder, the first string in
+ // 'format' contains no format placeholders and should be printed literally.
+ int result = printf("%s", format);
+ int pcs_r = 1; // Start at x1. x0 holds the format string.
+ int pcs_f = 0; // Start at d0.
+ if (result >= 0) {
+ for (uint32_t i = 0; i < placeholder_count; i++) {
+ int part_result = -1;
+
+ uint32_t arg_pattern = arg_pattern_list >> (i * kPrintfArgPatternBits);
+ arg_pattern &= (1 << kPrintfArgPatternBits) - 1;
+ switch (arg_pattern) {
+ case kPrintfArgW: part_result = printf(chunks[i], wreg(pcs_r++)); break;
+ case kPrintfArgX: part_result = printf(chunks[i], xreg(pcs_r++)); break;
+ case kPrintfArgD: part_result = printf(chunks[i], dreg(pcs_f++)); break;
+ default: VIXL_UNREACHABLE();
+ }
+
+ if (part_result < 0) {
+ // Handle error values.
+ result = part_result;
+ break;
+ }
+
+ result += part_result;
+ }
+ }
+
+ printf("%s", clr_normal);
+
+ // Printf returns its result in x0 (just like the C library's printf).
+ set_xreg(0, result);
+
+ // The printf parameters are inlined in the code, so skip them.
+ set_pc(instr->InstructionAtOffset(kPrintfLength));
+
+ // Set LR as if we'd just called a native printf function.
+ set_lr(pc());
+
+ js_free(format);
+}
+
+} // namespace vixl
+
+#endif // JS_SIMULATOR_ARM64
diff --git a/js/src/jit/arm64/vixl/Simulator-vixl.h b/js/src/jit/arm64/vixl/Simulator-vixl.h
new file mode 100644
index 0000000000..a0515bee06
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Simulator-vixl.h
@@ -0,0 +1,2571 @@
+// Copyright 2015, ARM Limited
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_A64_SIMULATOR_A64_H_
+#define VIXL_A64_SIMULATOR_A64_H_
+
+#include "jstypes.h"
+
+#ifdef JS_SIMULATOR_ARM64
+
+#include "mozilla/Vector.h"
+
+#include "jit/arm64/vixl/Assembler-vixl.h"
+#include "jit/arm64/vixl/Disasm-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+#include "jit/arm64/vixl/Instructions-vixl.h"
+#include "jit/arm64/vixl/Instrument-vixl.h"
+#include "jit/arm64/vixl/MozCachingDecoder.h"
+#include "jit/arm64/vixl/Simulator-Constants-vixl.h"
+#include "jit/arm64/vixl/Utils-vixl.h"
+#include "jit/IonTypes.h"
+#include "js/AllocPolicy.h"
+#include "vm/MutexIDs.h"
+#include "wasm/WasmSignalHandlers.h"
+
+namespace vixl {
+
+// Representation of memory, with typed getters and setters for access.
+class Memory {
+ public:
+ template <typename T>
+ static T AddressUntag(T address) {
+ // Cast the address using a C-style cast. A reinterpret_cast would be
+ // appropriate, but it can't cast one integral type to another.
+ uint64_t bits = (uint64_t)address;
+ return (T)(bits & ~kAddressTagMask);
+ }
+
+ template <typename T, typename A>
+ static T Read(A address) {
+ T value;
+ address = AddressUntag(address);
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
+ memcpy(&value, reinterpret_cast<const char *>(address), sizeof(value));
+ return value;
+ }
+
+ template <typename T, typename A>
+ static void Write(A address, T value) {
+ address = AddressUntag(address);
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8) ||
+ (sizeof(value) == 16));
+ memcpy(reinterpret_cast<char *>(address), &value, sizeof(value));
+ }
+};
+
+// Represent a register (r0-r31, v0-v31).
+template<int kSizeInBytes>
+class SimRegisterBase {
+ public:
+ SimRegisterBase() : written_since_last_log_(false) {}
+
+ // Write the specified value. The value is zero-extended if necessary.
+ template<typename T>
+ void Set(T new_value) {
+ VIXL_STATIC_ASSERT(sizeof(new_value) <= kSizeInBytes);
+ if (sizeof(new_value) < kSizeInBytes) {
+ // All AArch64 registers are zero-extending.
+ memset(value_ + sizeof(new_value), 0, kSizeInBytes - sizeof(new_value));
+ }
+ memcpy(value_, &new_value, sizeof(new_value));
+ NotifyRegisterWrite();
+ }
+
+ // Insert a typed value into a register, leaving the rest of the register
+ // unchanged. The lane parameter indicates where in the register the value
+ // should be inserted, in the range [ 0, sizeof(value_) / sizeof(T) ), where
+ // 0 represents the least significant bits.
+ template<typename T>
+ void Insert(int lane, T new_value) {
+ VIXL_ASSERT(lane >= 0);
+ VIXL_ASSERT((sizeof(new_value) +
+ (lane * sizeof(new_value))) <= kSizeInBytes);
+ memcpy(&value_[lane * sizeof(new_value)], &new_value, sizeof(new_value));
+ NotifyRegisterWrite();
+ }
+
+ // Read the value as the specified type. The value is truncated if necessary.
+ template<typename T>
+ T Get(int lane = 0) const {
+ T result;
+ VIXL_ASSERT(lane >= 0);
+ VIXL_ASSERT((sizeof(result) + (lane * sizeof(result))) <= kSizeInBytes);
+ memcpy(&result, &value_[lane * sizeof(result)], sizeof(result));
+ return result;
+ }
+
+ // TODO: Make this return a map of updated bytes, so that we can highlight
+ // updated lanes for load-and-insert. (That never happens for scalar code, but
+ // NEON has some instructions that can update individual lanes.)
+ bool WrittenSinceLastLog() const {
+ return written_since_last_log_;
+ }
+
+ void NotifyRegisterLogged() {
+ written_since_last_log_ = false;
+ }
+
+ protected:
+ uint8_t value_[kSizeInBytes];
+
+ // Helpers to aid with register tracing.
+ bool written_since_last_log_;
+
+ void NotifyRegisterWrite() {
+ written_since_last_log_ = true;
+ }
+};
+typedef SimRegisterBase<kXRegSizeInBytes> SimRegister; // r0-r31
+typedef SimRegisterBase<kQRegSizeInBytes> SimVRegister; // v0-v31
+
+// Representation of a vector register, with typed getters and setters for lanes
+// and additional information to represent lane state.
+class LogicVRegister {
+ public:
+ inline LogicVRegister(SimVRegister& other) // NOLINT
+ : register_(other) {
+ for (unsigned i = 0; i < sizeof(saturated_) / sizeof(saturated_[0]); i++) {
+ saturated_[i] = kNotSaturated;
+ }
+ for (unsigned i = 0; i < sizeof(round_) / sizeof(round_[0]); i++) {
+ round_[i] = 0;
+ }
+ }
+
+ int64_t Int(VectorFormat vform, int index) const {
+ int64_t element;
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: element = register_.Get<int8_t>(index); break;
+ case 16: element = register_.Get<int16_t>(index); break;
+ case 32: element = register_.Get<int32_t>(index); break;
+ case 64: element = register_.Get<int64_t>(index); break;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+ return element;
+ }
+
+ uint64_t Uint(VectorFormat vform, int index) const {
+ uint64_t element;
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: element = register_.Get<uint8_t>(index); break;
+ case 16: element = register_.Get<uint16_t>(index); break;
+ case 32: element = register_.Get<uint32_t>(index); break;
+ case 64: element = register_.Get<uint64_t>(index); break;
+ default: VIXL_UNREACHABLE(); return 0;
+ }
+ return element;
+ }
+
+ int64_t IntLeftJustified(VectorFormat vform, int index) const {
+ return Int(vform, index) << (64 - LaneSizeInBitsFromFormat(vform));
+ }
+
+ uint64_t UintLeftJustified(VectorFormat vform, int index) const {
+ return Uint(vform, index) << (64 - LaneSizeInBitsFromFormat(vform));
+ }
+
+ void SetInt(VectorFormat vform, int index, int64_t value) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, static_cast<int8_t>(value)); break;
+ case 16: register_.Insert(index, static_cast<int16_t>(value)); break;
+ case 32: register_.Insert(index, static_cast<int32_t>(value)); break;
+ case 64: register_.Insert(index, static_cast<int64_t>(value)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void SetUint(VectorFormat vform, int index, uint64_t value) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, static_cast<uint8_t>(value)); break;
+ case 16: register_.Insert(index, static_cast<uint16_t>(value)); break;
+ case 32: register_.Insert(index, static_cast<uint32_t>(value)); break;
+ case 64: register_.Insert(index, static_cast<uint64_t>(value)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void ReadUintFromMem(VectorFormat vform, int index, uint64_t addr) const {
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: register_.Insert(index, Memory::Read<uint8_t>(addr)); break;
+ case 16: register_.Insert(index, Memory::Read<uint16_t>(addr)); break;
+ case 32: register_.Insert(index, Memory::Read<uint32_t>(addr)); break;
+ case 64: register_.Insert(index, Memory::Read<uint64_t>(addr)); break;
+ default: VIXL_UNREACHABLE(); return;
+ }
+ }
+
+ void WriteUintToMem(VectorFormat vform, int index, uint64_t addr) const {
+ uint64_t value = Uint(vform, index);
+ switch (LaneSizeInBitsFromFormat(vform)) {
+ case 8: Memory::Write(addr, static_cast<uint8_t>(value)); break;
+ case 16: Memory::Write(addr, static_cast<uint16_t>(value)); break;
+ case 32: Memory::Write(addr, static_cast<uint32_t>(value)); break;
+ case 64: Memory::Write(addr, value); break;
+ }
+ }
+
+ template <typename T>
+ T Float(int index) const {
+ return register_.Get<T>(index);
+ }
+
+ template <typename T>
+ void SetFloat(int index, T value) const {
+ register_.Insert(index, value);
+ }
+
+ // When setting a result in a register of size less than Q, the top bits of
+ // the Q register must be cleared.
+ void ClearForWrite(VectorFormat vform) const {
+ unsigned size = RegisterSizeInBytesFromFormat(vform);
+ for (unsigned i = size; i < kQRegSizeInBytes; i++) {
+ SetUint(kFormat16B, i, 0);
+ }
+ }
+
+ // Saturation state for each lane of a vector.
+ enum Saturation {
+ kNotSaturated = 0,
+ kSignedSatPositive = 1 << 0,
+ kSignedSatNegative = 1 << 1,
+ kSignedSatMask = kSignedSatPositive | kSignedSatNegative,
+ kSignedSatUndefined = kSignedSatMask,
+ kUnsignedSatPositive = 1 << 2,
+ kUnsignedSatNegative = 1 << 3,
+ kUnsignedSatMask = kUnsignedSatPositive | kUnsignedSatNegative,
+ kUnsignedSatUndefined = kUnsignedSatMask
+ };
+
+ // Getters for saturation state.
+ Saturation GetSignedSaturation(int index) {
+ return static_cast<Saturation>(saturated_[index] & kSignedSatMask);
+ }
+
+ Saturation GetUnsignedSaturation(int index) {
+ return static_cast<Saturation>(saturated_[index] & kUnsignedSatMask);
+ }
+
+ // Setters for saturation state.
+ void ClearSat(int index) {
+ saturated_[index] = kNotSaturated;
+ }
+
+ void SetSignedSat(int index, bool positive) {
+ SetSatFlag(index, positive ? kSignedSatPositive : kSignedSatNegative);
+ }
+
+ void SetUnsignedSat(int index, bool positive) {
+ SetSatFlag(index, positive ? kUnsignedSatPositive : kUnsignedSatNegative);
+ }
+
+ void SetSatFlag(int index, Saturation sat) {
+ saturated_[index] = static_cast<Saturation>(saturated_[index] | sat);
+ VIXL_ASSERT((sat & kUnsignedSatMask) != kUnsignedSatUndefined);
+ VIXL_ASSERT((sat & kSignedSatMask) != kSignedSatUndefined);
+ }
+
+ // Saturate lanes of a vector based on saturation state.
+ LogicVRegister& SignedSaturate(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ Saturation sat = GetSignedSaturation(i);
+ if (sat == kSignedSatPositive) {
+ SetInt(vform, i, MaxIntFromFormat(vform));
+ } else if (sat == kSignedSatNegative) {
+ SetInt(vform, i, MinIntFromFormat(vform));
+ }
+ }
+ return *this;
+ }
+
+ LogicVRegister& UnsignedSaturate(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ Saturation sat = GetUnsignedSaturation(i);
+ if (sat == kUnsignedSatPositive) {
+ SetUint(vform, i, MaxUintFromFormat(vform));
+ } else if (sat == kUnsignedSatNegative) {
+ SetUint(vform, i, 0);
+ }
+ }
+ return *this;
+ }
+
+ // Getter for rounding state.
+ bool GetRounding(int index) {
+ return round_[index];
+ }
+
+ // Setter for rounding state.
+ void SetRounding(int index, bool round) {
+ round_[index] = round;
+ }
+
+ // Round lanes of a vector based on rounding state.
+ LogicVRegister& Round(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ SetInt(vform, i, Int(vform, i) + (GetRounding(i) ? 1 : 0));
+ }
+ return *this;
+ }
+
+ // Unsigned halve lanes of a vector, and use the saturation state to set the
+ // top bit.
+ LogicVRegister& Uhalve(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ uint64_t val = Uint(vform, i);
+ SetRounding(i, (val & 1) == 1);
+ val >>= 1;
+ if (GetUnsignedSaturation(i) != kNotSaturated) {
+ // If the operation causes unsigned saturation, the bit shifted into the
+ // most significant bit must be set.
+ val |= (MaxUintFromFormat(vform) >> 1) + 1;
+ }
+ SetInt(vform, i, val);
+ }
+ return *this;
+ }
+
+ // Signed halve lanes of a vector, and use the carry state to set the top bit.
+ LogicVRegister& Halve(VectorFormat vform) {
+ for (int i = 0; i < LaneCountFromFormat(vform); i++) {
+ int64_t val = Int(vform, i);
+ SetRounding(i, (val & 1) == 1);
+ val >>= 1;
+ if (GetSignedSaturation(i) != kNotSaturated) {
+ // If the operation causes signed saturation, the sign bit must be
+ // inverted.
+ val ^= (MaxUintFromFormat(vform) >> 1) + 1;
+ }
+ SetInt(vform, i, val);
+ }
+ return *this;
+ }
+
+ private:
+ SimVRegister& register_;
+
+ // Allocate one saturation state entry per lane; largest register is type Q,
+ // and lanes can be a minimum of one byte wide.
+ Saturation saturated_[kQRegSizeInBytes];
+
+ // Allocate one rounding state entry per lane.
+ bool round_[kQRegSizeInBytes];
+};
+
+// The proper way to initialize a simulated system register (such as NZCV) is as
+// follows:
+// SimSystemRegister nzcv = SimSystemRegister::DefaultValueFor(NZCV);
+class SimSystemRegister {
+ public:
+ // The default constructor represents a register which has no writable bits.
+ // It is not possible to set its value to anything other than 0.
+ SimSystemRegister() : value_(0), write_ignore_mask_(0xffffffff) { }
+
+ uint32_t RawValue() const {
+ return value_;
+ }
+
+ void SetRawValue(uint32_t new_value) {
+ value_ = (value_ & write_ignore_mask_) | (new_value & ~write_ignore_mask_);
+ }
+
+ uint32_t Bits(int msb, int lsb) const {
+ return ExtractUnsignedBitfield32(msb, lsb, value_);
+ }
+
+ int32_t SignedBits(int msb, int lsb) const {
+ return ExtractSignedBitfield32(msb, lsb, value_);
+ }
+
+ void SetBits(int msb, int lsb, uint32_t bits);
+
+ // Default system register values.
+ static SimSystemRegister DefaultValueFor(SystemRegister id);
+
+#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
+ uint32_t Name() const { return Func(HighBit, LowBit); } \
+ void Set##Name(uint32_t bits) { SetBits(HighBit, LowBit, bits); }
+#define DEFINE_WRITE_IGNORE_MASK(Name, Mask) \
+ static const uint32_t Name##WriteIgnoreMask = ~static_cast<uint32_t>(Mask);
+
+ SYSTEM_REGISTER_FIELDS_LIST(DEFINE_GETTER, DEFINE_WRITE_IGNORE_MASK)
+
+#undef DEFINE_ZERO_BITS
+#undef DEFINE_GETTER
+
+ protected:
+ // Most system registers only implement a few of the bits in the word. Other
+ // bits are "read-as-zero, write-ignored". The write_ignore_mask argument
+ // describes the bits which are not modifiable.
+ SimSystemRegister(uint32_t value, uint32_t write_ignore_mask)
+ : value_(value), write_ignore_mask_(write_ignore_mask) { }
+
+ uint32_t value_;
+ uint32_t write_ignore_mask_;
+};
+
+
+class SimExclusiveLocalMonitor {
+ public:
+ SimExclusiveLocalMonitor() : kSkipClearProbability(8), seed_(0x87654321) {
+ Clear();
+ }
+
+ // Clear the exclusive monitor (like clrex).
+ void Clear() {
+ address_ = 0;
+ size_ = 0;
+ }
+
+ // Clear the exclusive monitor most of the time.
+ void MaybeClear() {
+ if ((seed_ % kSkipClearProbability) != 0) {
+ Clear();
+ }
+
+ // Advance seed_ using a simple linear congruential generator.
+ seed_ = (seed_ * 48271) % 2147483647;
+ }
+
+ // Mark the address range for exclusive access (like load-exclusive).
+ void MarkExclusive(uint64_t address, size_t size) {
+ address_ = address;
+ size_ = size;
+ }
+
+ // Return true if the address range is marked (like store-exclusive).
+ // This helper doesn't implicitly clear the monitor.
+ bool IsExclusive(uint64_t address, size_t size) {
+ VIXL_ASSERT(size > 0);
+ // Be pedantic: Require both the address and the size to match.
+ return (size == size_) && (address == address_);
+ }
+
+ private:
+ uint64_t address_;
+ size_t size_;
+
+ const int kSkipClearProbability;
+ uint32_t seed_;
+};
+
+
+// We can't accurate simulate the global monitor since it depends on external
+// influences. Instead, this implementation occasionally causes accesses to
+// fail, according to kPassProbability.
+class SimExclusiveGlobalMonitor {
+ public:
+ SimExclusiveGlobalMonitor() : kPassProbability(8), seed_(0x87654321) {}
+
+ bool IsExclusive(uint64_t address, size_t size) {
+ USE(address, size);
+
+ bool pass = (seed_ % kPassProbability) != 0;
+ // Advance seed_ using a simple linear congruential generator.
+ seed_ = (seed_ * 48271) % 2147483647;
+ return pass;
+ }
+
+ private:
+ const int kPassProbability;
+ uint32_t seed_;
+};
+
+class Redirection;
+
+class Simulator : public DecoderVisitor {
+ public:
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ using Decoder = CachingDecoder;
+ mozilla::Atomic<bool> pendingCacheRequests = mozilla::Atomic<bool>{ false };
+#endif
+ explicit Simulator(Decoder* decoder, FILE* stream = stdout);
+ ~Simulator();
+
+ // Moz changes.
+ void init(Decoder* decoder, FILE* stream);
+ static Simulator* Current();
+ static Simulator* Create();
+ static void Destroy(Simulator* sim);
+ uintptr_t stackLimit() const;
+ uintptr_t* addressOfStackLimit();
+ bool overRecursed(uintptr_t newsp = 0) const;
+ bool overRecursedWithExtra(uint32_t extra) const;
+ int64_t call(uint8_t* entry, int argument_count, ...);
+ static void* RedirectNativeFunction(void* nativeFunction, js::jit::ABIFunctionType type);
+ void setGPR32Result(int32_t result);
+ void setGPR64Result(int64_t result);
+ void setFP32Result(float result);
+ void setFP64Result(double result);
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ void FlushICache();
+#endif
+ void VisitCallRedirection(const Instruction* instr);
+ static uintptr_t StackLimit() {
+ return Simulator::Current()->stackLimit();
+ }
+ template<typename T> T Read(uintptr_t address);
+ template <typename T> void Write(uintptr_t address_, T value);
+ JS::ProfilingFrameIterator::RegisterState registerState();
+
+ void ResetState();
+
+ // Run the simulator.
+ virtual void Run();
+ void RunFrom(const Instruction* first);
+
+ // Simulation helpers.
+ const Instruction* pc() const { return pc_; }
+ const Instruction* get_pc() const { return pc_; }
+ int64_t get_sp() const { return xreg(31, Reg31IsStackPointer); }
+ int64_t get_lr() const { return xreg(30); }
+ int64_t get_fp() const { return xreg(29); }
+
+ template <typename T>
+ T get_pc_as() const { return reinterpret_cast<T>(const_cast<Instruction*>(pc())); }
+
+ void set_pc(const Instruction* new_pc) {
+ pc_ = Memory::AddressUntag(new_pc);
+ pc_modified_ = true;
+ }
+
+ // Handle any wasm faults, returning true if the fault was handled.
+ // This method is rather hot so inline the normal (no-wasm) case.
+ bool MOZ_ALWAYS_INLINE handle_wasm_seg_fault(uintptr_t addr, unsigned numBytes) {
+ if (MOZ_LIKELY(!js::wasm::CodeExists)) {
+ return false;
+ }
+
+ uint8_t* newPC;
+ if (!js::wasm::MemoryAccessTraps(registerState(), (uint8_t*)addr, numBytes, &newPC)) {
+ return false;
+ }
+
+ set_pc((Instruction*)newPC);
+ return true;
+ }
+
+ void increment_pc() {
+ if (!pc_modified_) {
+ pc_ = pc_->NextInstruction();
+ }
+
+ pc_modified_ = false;
+ }
+
+ void ExecuteInstruction();
+
+ // Declare all Visitor functions.
+ #define DECLARE(A) virtual void Visit##A(const Instruction* instr) override;
+ VISITOR_LIST_THAT_RETURN(DECLARE)
+ VISITOR_LIST_THAT_DONT_RETURN(DECLARE)
+ #undef DECLARE
+
+
+ // Integer register accessors.
+
+ // Basic accessor: Read the register as the specified type.
+ template<typename T>
+ T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ VIXL_ASSERT(code < kNumberOfRegisters);
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ T result;
+ memset(&result, 0, sizeof(result));
+ return result;
+ }
+ return registers_[code].Get<T>();
+ }
+
+ // Common specialized accessors for the reg() template.
+ int32_t wreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int32_t>(code, r31mode);
+ }
+
+ int64_t xreg(unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(code, r31mode);
+ }
+
+ // As above, with parameterized size and return type. The value is
+ // either zero-extended or truncated to fit, as required.
+ template<typename T>
+ T reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ uint64_t raw;
+ switch (size) {
+ case kWRegSize: raw = reg<uint32_t>(code, r31mode); break;
+ case kXRegSize: raw = reg<uint64_t>(code, r31mode); break;
+ default:
+ VIXL_UNREACHABLE();
+ return 0;
+ }
+
+ T result;
+ VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw));
+ // Copy the result and truncate to fit. This assumes a little-endian host.
+ memcpy(&result, &raw, sizeof(result));
+ return result;
+ }
+
+ // Use int64_t by default if T is not specified.
+ int64_t reg(unsigned size, unsigned code,
+ Reg31Mode r31mode = Reg31IsZeroRegister) const {
+ return reg<int64_t>(size, code, r31mode);
+ }
+
+ enum RegLogMode {
+ LogRegWrites,
+ NoRegLog
+ };
+
+ // Write 'value' into an integer register. The value is zero-extended. This
+ // behaviour matches AArch64 register writes.
+ template<typename T>
+ void set_reg(unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ VIXL_STATIC_ASSERT((sizeof(T) == kWRegSizeInBytes) ||
+ (sizeof(T) == kXRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfRegisters);
+
+ if ((code == 31) && (r31mode == Reg31IsZeroRegister)) {
+ return;
+ }
+
+ registers_[code].Set(value);
+
+ if (log_mode == LogRegWrites) LogRegister(code, r31mode);
+ }
+
+ // Common specialized accessors for the set_reg() template.
+ void set_wreg(unsigned code, int32_t value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(code, value, log_mode, r31mode);
+ }
+
+ void set_xreg(unsigned code, int64_t value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ set_reg(code, value, log_mode, r31mode);
+ }
+
+ // As above, with parameterized size and type. The value is either
+ // zero-extended or truncated to fit, as required.
+ template<typename T>
+ void set_reg(unsigned size, unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites,
+ Reg31Mode r31mode = Reg31IsZeroRegister) {
+ // Zero-extend the input.
+ uint64_t raw = 0;
+ VIXL_STATIC_ASSERT(sizeof(value) <= sizeof(raw));
+ memcpy(&raw, &value, sizeof(value));
+
+ // Write (and possibly truncate) the value.
+ switch (size) {
+ case kWRegSize:
+ set_reg(code, static_cast<uint32_t>(raw), log_mode, r31mode);
+ break;
+ case kXRegSize:
+ set_reg(code, raw, log_mode, r31mode);
+ break;
+ default:
+ VIXL_UNREACHABLE();
+ return;
+ }
+ }
+
+ // Common specialized accessors for the set_reg() template.
+
+ // Commonly-used special cases.
+ template<typename T>
+ void set_lr(T value) {
+ set_reg(kLinkRegCode, value);
+ }
+
+ template<typename T>
+ void set_sp(T value) {
+ set_reg(31, value, LogRegWrites, Reg31IsStackPointer);
+ }
+
+ // Vector register accessors.
+ // These are equivalent to the integer register accessors, but for vector
+ // registers.
+
+ // A structure for representing a 128-bit Q register.
+ struct qreg_t { uint8_t val[kQRegSizeInBytes]; };
+
+ // Basic accessor: read the register as the specified type.
+ template<typename T>
+ T vreg(unsigned code) const {
+ VIXL_STATIC_ASSERT((sizeof(T) == kBRegSizeInBytes) ||
+ (sizeof(T) == kHRegSizeInBytes) ||
+ (sizeof(T) == kSRegSizeInBytes) ||
+ (sizeof(T) == kDRegSizeInBytes) ||
+ (sizeof(T) == kQRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+
+ return vregisters_[code].Get<T>();
+ }
+
+ // Common specialized accessors for the vreg() template.
+ int8_t breg(unsigned code) const {
+ return vreg<int8_t>(code);
+ }
+
+ int16_t hreg(unsigned code) const {
+ return vreg<int16_t>(code);
+ }
+
+ float sreg(unsigned code) const {
+ return vreg<float>(code);
+ }
+
+ uint32_t sreg_bits(unsigned code) const {
+ return vreg<uint32_t>(code);
+ }
+
+ double dreg(unsigned code) const {
+ return vreg<double>(code);
+ }
+
+ uint64_t dreg_bits(unsigned code) const {
+ return vreg<uint64_t>(code);
+ }
+
+ qreg_t qreg(unsigned code) const {
+ return vreg<qreg_t>(code);
+ }
+
+ // As above, with parameterized size and return type. The value is
+ // either zero-extended or truncated to fit, as required.
+ template<typename T>
+ T vreg(unsigned size, unsigned code) const {
+ uint64_t raw = 0;
+ T result;
+
+ switch (size) {
+ case kSRegSize: raw = vreg<uint32_t>(code); break;
+ case kDRegSize: raw = vreg<uint64_t>(code); break;
+ default:
+ VIXL_UNREACHABLE();
+ break;
+ }
+
+ VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(raw));
+ // Copy the result and truncate to fit. This assumes a little-endian host.
+ memcpy(&result, &raw, sizeof(result));
+ return result;
+ }
+
+ inline SimVRegister& vreg(unsigned code) {
+ return vregisters_[code];
+ }
+
+ // Basic accessor: Write the specified value.
+ template<typename T>
+ void set_vreg(unsigned code, T value,
+ RegLogMode log_mode = LogRegWrites) {
+ VIXL_STATIC_ASSERT((sizeof(value) == kBRegSizeInBytes) ||
+ (sizeof(value) == kHRegSizeInBytes) ||
+ (sizeof(value) == kSRegSizeInBytes) ||
+ (sizeof(value) == kDRegSizeInBytes) ||
+ (sizeof(value) == kQRegSizeInBytes));
+ VIXL_ASSERT(code < kNumberOfVRegisters);
+ vregisters_[code].Set(value);
+
+ if (log_mode == LogRegWrites) {
+ LogVRegister(code, GetPrintRegisterFormat(value));
+ }
+ }
+
+ // Common specialized accessors for the set_vreg() template.
+ void set_breg(unsigned code, int8_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_hreg(unsigned code, int16_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_sreg(unsigned code, float value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_sreg_bits(unsigned code, uint32_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_dreg(unsigned code, double value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_dreg_bits(unsigned code, uint64_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ void set_qreg(unsigned code, qreg_t value,
+ RegLogMode log_mode = LogRegWrites) {
+ set_vreg(code, value, log_mode);
+ }
+
+ bool N() const { return nzcv_.N() != 0; }
+ bool Z() const { return nzcv_.Z() != 0; }
+ bool C() const { return nzcv_.C() != 0; }
+ bool V() const { return nzcv_.V() != 0; }
+
+ SimSystemRegister& ReadNzcv() { return nzcv_; }
+ SimSystemRegister& nzcv() { return nzcv_; }
+
+ // TODO: Find a way to make the fpcr_ members return the proper types, so
+ // these accessors are not necessary.
+ FPRounding RMode() { return static_cast<FPRounding>(fpcr_.RMode()); }
+ bool DN() { return fpcr_.DN() != 0; }
+ SimSystemRegister& fpcr() { return fpcr_; }
+
+ UseDefaultNaN ReadDN() const {
+ return fpcr_.DN() != 0 ? kUseDefaultNaN : kIgnoreDefaultNaN;
+ }
+
+ // Specify relevant register formats for Print(V)Register and related helpers.
+ enum PrintRegisterFormat {
+ // The lane size.
+ kPrintRegLaneSizeB = 0 << 0,
+ kPrintRegLaneSizeH = 1 << 0,
+ kPrintRegLaneSizeS = 2 << 0,
+ kPrintRegLaneSizeW = kPrintRegLaneSizeS,
+ kPrintRegLaneSizeD = 3 << 0,
+ kPrintRegLaneSizeX = kPrintRegLaneSizeD,
+ kPrintRegLaneSizeQ = 4 << 0,
+
+ kPrintRegLaneSizeOffset = 0,
+ kPrintRegLaneSizeMask = 7 << 0,
+
+ // The lane count.
+ kPrintRegAsScalar = 0,
+ kPrintRegAsDVector = 1 << 3,
+ kPrintRegAsQVector = 2 << 3,
+
+ kPrintRegAsVectorMask = 3 << 3,
+
+ // Indicate floating-point format lanes. (This flag is only supported for S-
+ // and D-sized lanes.)
+ kPrintRegAsFP = 1 << 5,
+
+ // Supported combinations.
+
+ kPrintXReg = kPrintRegLaneSizeX | kPrintRegAsScalar,
+ kPrintWReg = kPrintRegLaneSizeW | kPrintRegAsScalar,
+ kPrintSReg = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintDReg = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP,
+
+ kPrintReg1B = kPrintRegLaneSizeB | kPrintRegAsScalar,
+ kPrintReg8B = kPrintRegLaneSizeB | kPrintRegAsDVector,
+ kPrintReg16B = kPrintRegLaneSizeB | kPrintRegAsQVector,
+ kPrintReg1H = kPrintRegLaneSizeH | kPrintRegAsScalar,
+ kPrintReg4H = kPrintRegLaneSizeH | kPrintRegAsDVector,
+ kPrintReg8H = kPrintRegLaneSizeH | kPrintRegAsQVector,
+ kPrintReg1S = kPrintRegLaneSizeS | kPrintRegAsScalar,
+ kPrintReg2S = kPrintRegLaneSizeS | kPrintRegAsDVector,
+ kPrintReg4S = kPrintRegLaneSizeS | kPrintRegAsQVector,
+ kPrintReg1SFP = kPrintRegLaneSizeS | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintReg2SFP = kPrintRegLaneSizeS | kPrintRegAsDVector | kPrintRegAsFP,
+ kPrintReg4SFP = kPrintRegLaneSizeS | kPrintRegAsQVector | kPrintRegAsFP,
+ kPrintReg1D = kPrintRegLaneSizeD | kPrintRegAsScalar,
+ kPrintReg2D = kPrintRegLaneSizeD | kPrintRegAsQVector,
+ kPrintReg1DFP = kPrintRegLaneSizeD | kPrintRegAsScalar | kPrintRegAsFP,
+ kPrintReg2DFP = kPrintRegLaneSizeD | kPrintRegAsQVector | kPrintRegAsFP,
+ kPrintReg1Q = kPrintRegLaneSizeQ | kPrintRegAsScalar
+ };
+
+ unsigned GetPrintRegLaneSizeInBytesLog2(PrintRegisterFormat format) {
+ return (format & kPrintRegLaneSizeMask) >> kPrintRegLaneSizeOffset;
+ }
+
+ unsigned GetPrintRegLaneSizeInBytes(PrintRegisterFormat format) {
+ return 1 << GetPrintRegLaneSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegSizeInBytesLog2(PrintRegisterFormat format) {
+ if (format & kPrintRegAsDVector) return kDRegSizeInBytesLog2;
+ if (format & kPrintRegAsQVector) return kQRegSizeInBytesLog2;
+
+ // Scalar types.
+ return GetPrintRegLaneSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegSizeInBytes(PrintRegisterFormat format) {
+ return 1 << GetPrintRegSizeInBytesLog2(format);
+ }
+
+ unsigned GetPrintRegLaneCount(PrintRegisterFormat format) {
+ unsigned reg_size_log2 = GetPrintRegSizeInBytesLog2(format);
+ unsigned lane_size_log2 = GetPrintRegLaneSizeInBytesLog2(format);
+ VIXL_ASSERT(reg_size_log2 >= lane_size_log2);
+ return 1 << (reg_size_log2 - lane_size_log2);
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned reg_size,
+ unsigned lane_size);
+
+ PrintRegisterFormat GetPrintRegisterFormatForSize(unsigned size) {
+ return GetPrintRegisterFormatForSize(size, size);
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatForSizeFP(unsigned size) {
+ switch (size) {
+ default: VIXL_UNREACHABLE(); return kPrintDReg;
+ case kDRegSizeInBytes: return kPrintDReg;
+ case kSRegSizeInBytes: return kPrintSReg;
+ }
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormatTryFP(PrintRegisterFormat format) {
+ if ((GetPrintRegLaneSizeInBytes(format) == kSRegSizeInBytes) ||
+ (GetPrintRegLaneSizeInBytes(format) == kDRegSizeInBytes)) {
+ return static_cast<PrintRegisterFormat>(format | kPrintRegAsFP);
+ }
+ return format;
+ }
+
+ template<typename T>
+ PrintRegisterFormat GetPrintRegisterFormat(T value) {
+ return GetPrintRegisterFormatForSize(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(double value) {
+ VIXL_STATIC_ASSERT(sizeof(value) == kDRegSizeInBytes);
+ return GetPrintRegisterFormatForSizeFP(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(float value) {
+ VIXL_STATIC_ASSERT(sizeof(value) == kSRegSizeInBytes);
+ return GetPrintRegisterFormatForSizeFP(sizeof(value));
+ }
+
+ PrintRegisterFormat GetPrintRegisterFormat(VectorFormat vform);
+
+ // Print all registers of the specified types.
+ void PrintRegisters();
+ void PrintVRegisters();
+ void PrintSystemRegisters();
+
+ // As above, but only print the registers that have been updated.
+ void PrintWrittenRegisters();
+ void PrintWrittenVRegisters();
+
+ // As above, but respect LOG_REG and LOG_VREG.
+ void LogWrittenRegisters() {
+ if (trace_parameters() & LOG_REGS) PrintWrittenRegisters();
+ }
+ void LogWrittenVRegisters() {
+ if (trace_parameters() & LOG_VREGS) PrintWrittenVRegisters();
+ }
+ void LogAllWrittenRegisters() {
+ LogWrittenRegisters();
+ LogWrittenVRegisters();
+ }
+
+ // Print individual register values (after update).
+ void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer);
+ void PrintVRegister(unsigned code, PrintRegisterFormat format);
+ void PrintSystemRegister(SystemRegister id);
+
+ // Like Print* (above), but respect trace_parameters().
+ void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) {
+ if (trace_parameters() & LOG_REGS) PrintRegister(code, r31mode);
+ }
+ void LogVRegister(unsigned code, PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_VREGS) PrintVRegister(code, format);
+ }
+ void LogSystemRegister(SystemRegister id) {
+ if (trace_parameters() & LOG_SYSREGS) PrintSystemRegister(id);
+ }
+
+ // Print memory accesses.
+ void PrintRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format);
+ void PrintWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format);
+ void PrintVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane);
+ void PrintVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane);
+
+ // Like Print* (above), but respect trace_parameters().
+ void LogRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_REGS) PrintRead(address, reg_code, format);
+ }
+ void LogWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format) {
+ if (trace_parameters() & LOG_WRITE) PrintWrite(address, reg_code, format);
+ }
+ void LogVRead(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane = 0) {
+ if (trace_parameters() & LOG_VREGS) {
+ PrintVRead(address, reg_code, format, lane);
+ }
+ }
+ void LogVWrite(uintptr_t address, unsigned reg_code,
+ PrintRegisterFormat format, unsigned lane = 0) {
+ if (trace_parameters() & LOG_WRITE) {
+ PrintVWrite(address, reg_code, format, lane);
+ }
+ }
+
+ // Helper functions for register tracing.
+ void PrintRegisterRawHelper(unsigned code, Reg31Mode r31mode,
+ int size_in_bytes = kXRegSizeInBytes);
+ void PrintVRegisterRawHelper(unsigned code, int bytes = kQRegSizeInBytes,
+ int lsb = 0);
+ void PrintVRegisterFPHelper(unsigned code, unsigned lane_size_in_bytes,
+ int lane_count = 1, int rightmost_lane = 0);
+
+ void DoUnreachable(const Instruction* instr);
+ void DoTrace(const Instruction* instr);
+ void DoLog(const Instruction* instr);
+
+ static const char* WRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static const char* XRegNameForCode(unsigned code,
+ Reg31Mode mode = Reg31IsZeroRegister);
+ static const char* SRegNameForCode(unsigned code);
+ static const char* DRegNameForCode(unsigned code);
+ static const char* VRegNameForCode(unsigned code);
+
+ bool coloured_trace() const { return coloured_trace_; }
+ void set_coloured_trace(bool value);
+
+ int trace_parameters() const { return trace_parameters_; }
+ void set_trace_parameters(int parameters);
+
+ void set_instruction_stats(bool value);
+
+ // Clear the simulated local monitor to force the next store-exclusive
+ // instruction to fail.
+ void ClearLocalMonitor() {
+ local_monitor_.Clear();
+ }
+
+ void SilenceExclusiveAccessWarning() {
+ print_exclusive_access_warning_ = false;
+ }
+
+ protected:
+ const char* clr_normal;
+ const char* clr_flag_name;
+ const char* clr_flag_value;
+ const char* clr_reg_name;
+ const char* clr_reg_value;
+ const char* clr_vreg_name;
+ const char* clr_vreg_value;
+ const char* clr_memory_address;
+ const char* clr_warning;
+ const char* clr_warning_message;
+ const char* clr_printf;
+
+ // Simulation helpers ------------------------------------
+ bool ConditionPassed(Condition cond) {
+ switch (cond) {
+ case eq:
+ return Z();
+ case ne:
+ return !Z();
+ case hs:
+ return C();
+ case lo:
+ return !C();
+ case mi:
+ return N();
+ case pl:
+ return !N();
+ case vs:
+ return V();
+ case vc:
+ return !V();
+ case hi:
+ return C() && !Z();
+ case ls:
+ return !(C() && !Z());
+ case ge:
+ return N() == V();
+ case lt:
+ return N() != V();
+ case gt:
+ return !Z() && (N() == V());
+ case le:
+ return !(!Z() && (N() == V()));
+ case nv:
+ VIXL_FALLTHROUGH();
+ case al:
+ return true;
+ default:
+ VIXL_UNREACHABLE();
+ return false;
+ }
+ }
+
+ bool ConditionPassed(Instr cond) {
+ return ConditionPassed(static_cast<Condition>(cond));
+ }
+
+ bool ConditionFailed(Condition cond) {
+ return !ConditionPassed(cond);
+ }
+
+ void AddSubHelper(const Instruction* instr, int64_t op2);
+ uint64_t AddWithCarry(unsigned reg_size,
+ bool set_flags,
+ uint64_t left,
+ uint64_t right,
+ int carry_in = 0);
+ void LogicalHelper(const Instruction* instr, int64_t op2);
+ void ConditionalCompareHelper(const Instruction* instr, int64_t op2);
+ void LoadStoreHelper(const Instruction* instr,
+ int64_t offset,
+ AddrMode addrmode);
+ void LoadStorePairHelper(const Instruction* instr, AddrMode addrmode);
+ uintptr_t AddressModeHelper(unsigned addr_reg,
+ int64_t offset,
+ AddrMode addrmode);
+ void NEONLoadStoreMultiStructHelper(const Instruction* instr,
+ AddrMode addr_mode);
+ void NEONLoadStoreSingleStructHelper(const Instruction* instr,
+ AddrMode addr_mode);
+
+ uint64_t AddressUntag(uint64_t address) {
+ return address & ~kAddressTagMask;
+ }
+
+ template <typename T>
+ T* AddressUntag(T* address) {
+ uintptr_t address_raw = reinterpret_cast<uintptr_t>(address);
+ return reinterpret_cast<T*>(AddressUntag(address_raw));
+ }
+
+ int64_t ShiftOperand(unsigned reg_size,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t Rotate(unsigned reg_width,
+ int64_t value,
+ Shift shift_type,
+ unsigned amount);
+ int64_t ExtendValue(unsigned reg_width,
+ int64_t value,
+ Extend extend_type,
+ unsigned left_shift = 0);
+ uint16_t PolynomialMult(uint8_t op1, uint8_t op2);
+
+ void ld1(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr);
+ void ld1(VectorFormat vform,
+ LogicVRegister dst,
+ int index,
+ uint64_t addr);
+ void ld1r(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t addr);
+ void ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr);
+ void ld2(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ int index,
+ uint64_t addr);
+ void ld2r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ uint64_t addr);
+ void ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr);
+ void ld3(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ int index,
+ uint64_t addr);
+ void ld3r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ uint64_t addr);
+ void ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr);
+ void ld4(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ int index,
+ uint64_t addr);
+ void ld4r(VectorFormat vform,
+ LogicVRegister dst1,
+ LogicVRegister dst2,
+ LogicVRegister dst3,
+ LogicVRegister dst4,
+ uint64_t addr);
+ void st1(VectorFormat vform,
+ LogicVRegister src,
+ uint64_t addr);
+ void st1(VectorFormat vform,
+ LogicVRegister src,
+ int index,
+ uint64_t addr);
+ void st2(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ uint64_t addr);
+ void st2(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ int index,
+ uint64_t addr);
+ void st3(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ uint64_t addr);
+ void st3(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ int index,
+ uint64_t addr);
+ void st4(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ LogicVRegister src4,
+ uint64_t addr);
+ void st4(VectorFormat vform,
+ LogicVRegister src,
+ LogicVRegister src2,
+ LogicVRegister src3,
+ LogicVRegister src4,
+ int index,
+ uint64_t addr);
+ LogicVRegister cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister cmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ int imm,
+ Condition cond);
+ LogicVRegister cmptst(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister add(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister mul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister mla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister mls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister pmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+
+ typedef LogicVRegister (Simulator::*ByElementOp)(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister fmulx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister smlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister umlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmull(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmull2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlal(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlal2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmlsl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister sub(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister and_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister orn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister eor(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bic(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm);
+ LogicVRegister bif(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister bsl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister cls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister clz(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister cnt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister not_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rbit(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int revSize);
+ LogicVRegister rev16(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev32(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister rev64(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister addlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool is_signed,
+ bool do_accumulate);
+ LogicVRegister saddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uaddlp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uadalp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ext(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ int index);
+ LogicVRegister ins_element(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ int src_index);
+ LogicVRegister ins_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ uint64_t imm);
+ LogicVRegister dup_element(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int src_index);
+ LogicVRegister dup_immediate(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister mov(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister movi(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister mvni(VectorFormat vform,
+ LogicVRegister dst,
+ uint64_t imm);
+ LogicVRegister orr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ uint64_t imm);
+ LogicVRegister sshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ushl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max);
+ LogicVRegister smax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister smin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister smaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister sminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister addp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister addv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uaddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister saddlv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister smaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sxtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sxtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind);
+ LogicVRegister tbl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& ind);
+ LogicVRegister tbx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& tab,
+ const LogicVRegister& tab2,
+ const LogicVRegister& tab3,
+ const LogicVRegister& tab4,
+ const LogicVRegister& ind);
+ LogicVRegister uaddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister saddw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister usubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubw(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister ssubw2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool max);
+ LogicVRegister umax(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister umin(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmaxp(VectorFormat vform,
+ LogicVRegister dst,
+ int dst_index,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister umaxp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ bool max);
+ LogicVRegister umaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister trn1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister trn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister zip1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister zip2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uzp1(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uzp2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister shl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister scvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding rounding_mode);
+ LogicVRegister ucvtf(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int fbits,
+ FPRounding rounding_mode);
+ LogicVRegister sshll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sshll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister shll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister shll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister ushll(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ushll2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sli(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sri(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sshr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ushr(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ssra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister usra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister srsra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister ursra(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister suqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister usqadd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshlu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister abs(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister neg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister extractnarrow(VectorFormat vform,
+ LogicVRegister dst,
+ bool dstIsSigned,
+ const LogicVRegister& src,
+ bool srcIsSigned);
+ LogicVRegister xtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister uqxtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister sqxtun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister absdiff(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool issigned);
+ LogicVRegister saba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister uaba(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister shrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister shrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister rshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister rshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister uqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrun(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrshrun2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ int shift);
+ LogicVRegister sqrdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ bool round = true);
+ LogicVRegister sqdmulh(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ #define NEON_3VREG_LOGIC_LIST(V) \
+ V(addhn) \
+ V(addhn2) \
+ V(raddhn) \
+ V(raddhn2) \
+ V(subhn) \
+ V(subhn2) \
+ V(rsubhn) \
+ V(rsubhn2) \
+ V(pmull) \
+ V(pmull2) \
+ V(sabal) \
+ V(sabal2) \
+ V(uabal) \
+ V(uabal2) \
+ V(sabdl) \
+ V(sabdl2) \
+ V(uabdl) \
+ V(uabdl2) \
+ V(smull) \
+ V(smull2) \
+ V(umull) \
+ V(umull2) \
+ V(smlal) \
+ V(smlal2) \
+ V(umlal) \
+ V(umlal2) \
+ V(smlsl) \
+ V(smlsl2) \
+ V(umlsl) \
+ V(umlsl2) \
+ V(sqdmlal) \
+ V(sqdmlal2) \
+ V(sqdmlsl) \
+ V(sqdmlsl2) \
+ V(sqdmull) \
+ V(sqdmull2)
+
+ #define DEFINE_LOGIC_FUNC(FXN) \
+ LogicVRegister FXN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2);
+ NEON_3VREG_LOGIC_LIST(DEFINE_LOGIC_FUNC)
+ #undef DEFINE_LOGIC_FUNC
+
+ #define NEON_FP3SAME_LIST(V) \
+ V(fadd, FPAdd, false) \
+ V(fsub, FPSub, true) \
+ V(fmul, FPMul, true) \
+ V(fmulx, FPMulx, true) \
+ V(fdiv, FPDiv, true) \
+ V(fmax, FPMax, false) \
+ V(fmin, FPMin, false) \
+ V(fmaxnm, FPMaxNM, false) \
+ V(fminnm, FPMinNM, false)
+
+ #define DECLARE_NEON_FP_VECTOR_OP(FN, OP, PROCNAN) \
+ template <typename T> \
+ LogicVRegister FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2); \
+ LogicVRegister FN(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2);
+ NEON_FP3SAME_LIST(DECLARE_NEON_FP_VECTOR_OP)
+ #undef DECLARE_NEON_FP_VECTOR_OP
+
+ #define NEON_FPPAIRWISE_LIST(V) \
+ V(faddp, fadd, FPAdd) \
+ V(fmaxp, fmax, FPMax) \
+ V(fmaxnmp, fmaxnm, FPMaxNM) \
+ V(fminp, fmin, FPMin) \
+ V(fminnmp, fminnm, FPMinNM)
+
+ #define DECLARE_NEON_FP_PAIR_OP(FNP, FN, OP) \
+ LogicVRegister FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src1, \
+ const LogicVRegister& src2); \
+ LogicVRegister FNP(VectorFormat vform, \
+ LogicVRegister dst, \
+ const LogicVRegister& src);
+ NEON_FPPAIRWISE_LIST(DECLARE_NEON_FP_PAIR_OP)
+ #undef DECLARE_NEON_FP_PAIR_OP
+
+ template <typename T>
+ LogicVRegister frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frecps(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frsqrts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmla(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ template <typename T>
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fmls(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister fnmul(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+
+ template <typename T>
+ LogicVRegister fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fcmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fabscmp(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2,
+ Condition cond);
+ LogicVRegister fcmp_zero(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ Condition cond);
+
+ template <typename T>
+ LogicVRegister fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fneg(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ template <typename T>
+ LogicVRegister frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frecpx(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ template <typename T>
+ LogicVRegister fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fabs_(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fabd(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src1,
+ const LogicVRegister& src2);
+ LogicVRegister frint(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ bool inexact_exception = false);
+ LogicVRegister fcvts(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits = 0);
+ LogicVRegister fcvtu(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding_mode,
+ int fbits = 0);
+ LogicVRegister fcvtl(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtl2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtxn(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fcvtxn2(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fsqrt(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frsqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister frecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPRounding rounding);
+ LogicVRegister ursqrte(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister urecpe(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+
+ typedef float (Simulator::*FPMinMaxOp)(float a, float b);
+
+ LogicVRegister fminmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src,
+ FPMinMaxOp Op);
+
+ LogicVRegister fminv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fmaxv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fminnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+ LogicVRegister fmaxnmv(VectorFormat vform,
+ LogicVRegister dst,
+ const LogicVRegister& src);
+
+ static const uint32_t CRC32_POLY = 0x04C11DB7;
+ static const uint32_t CRC32C_POLY = 0x1EDC6F41;
+ uint32_t Poly32Mod2(unsigned n, uint64_t data, uint32_t poly);
+ template <typename T>
+ uint32_t Crc32Checksum(uint32_t acc, T val, uint32_t poly);
+ uint32_t Crc32Checksum(uint32_t acc, uint64_t val, uint32_t poly);
+
+ void SysOp_W(int op, int64_t val);
+
+ template <typename T>
+ T FPRecipSqrtEstimate(T op);
+ template <typename T>
+ T FPRecipEstimate(T op, FPRounding rounding);
+ template <typename T, typename R>
+ R FPToFixed(T op, int fbits, bool is_signed, FPRounding rounding);
+
+ void FPCompare(double val0, double val1, FPTrapFlags trap);
+ double FPRoundInt(double value, FPRounding round_mode);
+ double recip_sqrt_estimate(double a);
+ double recip_estimate(double a);
+ double FPRecipSqrtEstimate(double a);
+ double FPRecipEstimate(double a);
+ double FixedToDouble(int64_t src, int fbits, FPRounding round_mode);
+ double UFixedToDouble(uint64_t src, int fbits, FPRounding round_mode);
+ float FixedToFloat(int64_t src, int fbits, FPRounding round_mode);
+ float UFixedToFloat(uint64_t src, int fbits, FPRounding round_mode);
+ int32_t FPToInt32(double value, FPRounding rmode);
+ int64_t FPToInt64(double value, FPRounding rmode);
+ uint32_t FPToUInt32(double value, FPRounding rmode);
+ uint64_t FPToUInt64(double value, FPRounding rmode);
+ int32_t FPToFixedJS(double value);
+
+ template <typename T>
+ T FPAdd(T op1, T op2);
+
+ template <typename T>
+ T FPDiv(T op1, T op2);
+
+ template <typename T>
+ T FPMax(T a, T b);
+
+ template <typename T>
+ T FPMaxNM(T a, T b);
+
+ template <typename T>
+ T FPMin(T a, T b);
+
+ template <typename T>
+ T FPMinNM(T a, T b);
+
+ template <typename T>
+ T FPMul(T op1, T op2);
+
+ template <typename T>
+ T FPMulx(T op1, T op2);
+
+ template <typename T>
+ T FPMulAdd(T a, T op1, T op2);
+
+ template <typename T>
+ T FPSqrt(T op);
+
+ template <typename T>
+ T FPSub(T op1, T op2);
+
+ template <typename T>
+ T FPRecipStepFused(T op1, T op2);
+
+ template <typename T>
+ T FPRSqrtStepFused(T op1, T op2);
+
+ // This doesn't do anything at the moment. We'll need it if we want support
+ // for cumulative exception bits or floating-point exceptions.
+ void FPProcessException() { }
+
+ bool FPProcessNaNs(const Instruction* instr);
+
+ // Pseudo Printf instruction
+ void DoPrintf(const Instruction* instr);
+
+ // Processor state ---------------------------------------
+
+ // Simulated monitors for exclusive access instructions.
+ SimExclusiveLocalMonitor local_monitor_;
+ SimExclusiveGlobalMonitor global_monitor_;
+
+ // Output stream.
+ FILE* stream_;
+ PrintDisassembler* print_disasm_;
+
+ // Instruction statistics instrumentation.
+ Instrument* instrumentation_;
+
+ // General purpose registers. Register 31 is the stack pointer.
+ SimRegister registers_[kNumberOfRegisters];
+
+ // Vector registers
+ SimVRegister vregisters_[kNumberOfVRegisters];
+
+ // Program Status Register.
+ // bits[31, 27]: Condition flags N, Z, C, and V.
+ // (Negative, Zero, Carry, Overflow)
+ SimSystemRegister nzcv_;
+
+ // Floating-Point Control Register
+ SimSystemRegister fpcr_;
+
+ // Only a subset of FPCR features are supported by the simulator. This helper
+ // checks that the FPCR settings are supported.
+ //
+ // This is checked when floating-point instructions are executed, not when
+ // FPCR is set. This allows generated code to modify FPCR for external
+ // functions, or to save and restore it when entering and leaving generated
+ // code.
+ void AssertSupportedFPCR() {
+ VIXL_ASSERT(fpcr().FZ() == 0); // No flush-to-zero support.
+ VIXL_ASSERT(fpcr().RMode() == FPTieEven); // Ties-to-even rounding only.
+
+ // The simulator does not support half-precision operations so fpcr().AHP()
+ // is irrelevant, and is not checked here.
+ }
+
+ static int CalcNFlag(uint64_t result, unsigned reg_size) {
+ return (result >> (reg_size - 1)) & 1;
+ }
+
+ static int CalcZFlag(uint64_t result) {
+ return (result == 0) ? 1 : 0;
+ }
+
+ static const uint32_t kConditionFlagsMask = 0xf0000000;
+
+ // Stack
+ byte* stack_;
+ static const int stack_protection_size_ = 512 * KBytes;
+ static const int stack_size_ = (2 * MBytes) + (2 * stack_protection_size_);
+ byte* stack_limit_;
+
+ Decoder* decoder_;
+ // Indicates if the pc has been modified by the instruction and should not be
+ // automatically incremented.
+ bool pc_modified_;
+ const Instruction* pc_;
+
+ static const char* xreg_names[];
+ static const char* wreg_names[];
+ static const char* sreg_names[];
+ static const char* dreg_names[];
+ static const char* vreg_names[];
+
+ static const Instruction* kEndOfSimAddress;
+
+ private:
+ template <typename T>
+ static T FPDefaultNaN();
+
+ // Standard NaN processing.
+ template <typename T>
+ T FPProcessNaN(T op) {
+ VIXL_ASSERT(std::isnan(op));
+ if (IsSignallingNaN(op)) {
+ FPProcessException();
+ }
+ return DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
+ }
+
+ template <typename T>
+ T FPProcessNaNs(T op1, T op2) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op1)) {
+ VIXL_ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ VIXL_ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else {
+ return 0.0;
+ }
+ }
+
+ template <typename T>
+ T FPProcessNaNs3(T op1, T op2, T op3) {
+ if (IsSignallingNaN(op1)) {
+ return FPProcessNaN(op1);
+ } else if (IsSignallingNaN(op2)) {
+ return FPProcessNaN(op2);
+ } else if (IsSignallingNaN(op3)) {
+ return FPProcessNaN(op3);
+ } else if (std::isnan(op1)) {
+ VIXL_ASSERT(IsQuietNaN(op1));
+ return FPProcessNaN(op1);
+ } else if (std::isnan(op2)) {
+ VIXL_ASSERT(IsQuietNaN(op2));
+ return FPProcessNaN(op2);
+ } else if (std::isnan(op3)) {
+ VIXL_ASSERT(IsQuietNaN(op3));
+ return FPProcessNaN(op3);
+ } else {
+ return 0.0;
+ }
+ }
+
+ bool coloured_trace_;
+
+ // A set of TraceParameters flags.
+ int trace_parameters_;
+
+ // Indicates whether the instruction instrumentation is active.
+ bool instruction_stats_;
+
+ // Indicates whether the exclusive-access warning has been printed.
+ bool print_exclusive_access_warning_;
+ void PrintExclusiveAccessWarning();
+
+ // Indicates that the simulator ran out of memory at some point.
+ // Data structures may not be fully allocated.
+ bool oom_;
+
+ public:
+ // True if the simulator ran out of memory during or after construction.
+ bool oom() const { return oom_; }
+
+ protected:
+ mozilla::Vector<int64_t, 0, js::SystemAllocPolicy> spStack_;
+};
+
+} // namespace vixl
+
+namespace js {
+namespace jit {
+
+class SimulatorProcess
+{
+ public:
+ static SimulatorProcess* singleton_;
+
+ SimulatorProcess()
+ : lock_(mutexid::Arm64SimulatorLock)
+ , redirection_(nullptr)
+ {}
+
+ // Synchronizes access between main thread and compilation threads.
+ js::Mutex lock_ MOZ_UNANNOTATED;
+ vixl::Redirection* redirection_;
+
+#ifdef JS_CACHE_SIMULATOR_ARM64
+ // For each simulator, record what other thread registered as instruction
+ // being invalidated.
+ struct ICacheFlush {
+ void* start;
+ size_t length;
+ };
+ using ICacheFlushes = mozilla::Vector<ICacheFlush, 2>;
+ struct SimFlushes {
+ vixl::Simulator* thread;
+ ICacheFlushes records;
+ };
+ mozilla::Vector<SimFlushes, 1> pendingFlushes_;
+
+ static void recordICacheFlush(void* start, size_t length);
+ static void membarrier();
+ static ICacheFlushes& getICacheFlushes(vixl::Simulator* sim);
+ [[nodiscard]] static bool registerSimulator(vixl::Simulator* sim);
+ static void unregisterSimulator(vixl::Simulator* sim);
+#endif
+
+ static void setRedirection(vixl::Redirection* redirection) {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ singleton_->redirection_ = redirection;
+ }
+
+ static vixl::Redirection* redirection() {
+ singleton_->lock_.assertOwnedByCurrentThread();
+ return singleton_->redirection_;
+ }
+
+ static bool initialize() {
+ singleton_ = js_new<SimulatorProcess>();
+ return !!singleton_;
+ }
+ static void destroy() {
+ js_delete(singleton_);
+ singleton_ = nullptr;
+ }
+};
+
+// Protects the icache and redirection properties of the simulator.
+class AutoLockSimulatorCache : public js::LockGuard<js::Mutex>
+{
+ using Base = js::LockGuard<js::Mutex>;
+
+ public:
+ explicit AutoLockSimulatorCache()
+ : Base(SimulatorProcess::singleton_->lock_)
+ {
+ }
+};
+
+} // namespace jit
+} // namespace js
+
+#endif // JS_SIMULATOR_ARM64
+#endif // VIXL_A64_SIMULATOR_A64_H_
diff --git a/js/src/jit/arm64/vixl/Utils-vixl.cpp b/js/src/jit/arm64/vixl/Utils-vixl.cpp
new file mode 100644
index 0000000000..381c3501d1
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Utils-vixl.cpp
@@ -0,0 +1,555 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "jit/arm64/vixl/Utils-vixl.h"
+
+#include <cstdio>
+
+namespace vixl {
+
+// The default NaN values (for FPCR.DN=1).
+const double kFP64DefaultNaN = RawbitsToDouble(UINT64_C(0x7ff8000000000000));
+const float kFP32DefaultNaN = RawbitsToFloat(0x7fc00000);
+const Float16 kFP16DefaultNaN = RawbitsToFloat16(0x7e00);
+
+// Floating-point zero values.
+const Float16 kFP16PositiveZero = RawbitsToFloat16(0x0);
+const Float16 kFP16NegativeZero = RawbitsToFloat16(0x8000);
+
+// Floating-point infinity values.
+const Float16 kFP16PositiveInfinity = RawbitsToFloat16(0x7c00);
+const Float16 kFP16NegativeInfinity = RawbitsToFloat16(0xfc00);
+const float kFP32PositiveInfinity = RawbitsToFloat(0x7f800000);
+const float kFP32NegativeInfinity = RawbitsToFloat(0xff800000);
+const double kFP64PositiveInfinity =
+ RawbitsToDouble(UINT64_C(0x7ff0000000000000));
+const double kFP64NegativeInfinity =
+ RawbitsToDouble(UINT64_C(0xfff0000000000000));
+
+bool IsZero(Float16 value) {
+ uint16_t bits = Float16ToRawbits(value);
+ return (bits == Float16ToRawbits(kFP16PositiveZero) ||
+ bits == Float16ToRawbits(kFP16NegativeZero));
+}
+
+uint16_t Float16ToRawbits(Float16 value) { return value.rawbits_; }
+
+uint32_t FloatToRawbits(float value) {
+ uint32_t bits = 0;
+ memcpy(&bits, &value, 4);
+ return bits;
+}
+
+
+uint64_t DoubleToRawbits(double value) {
+ uint64_t bits = 0;
+ memcpy(&bits, &value, 8);
+ return bits;
+}
+
+
+Float16 RawbitsToFloat16(uint16_t bits) {
+ Float16 f;
+ f.rawbits_ = bits;
+ return f;
+}
+
+
+float RawbitsToFloat(uint32_t bits) {
+ float value = 0.0;
+ memcpy(&value, &bits, 4);
+ return value;
+}
+
+
+double RawbitsToDouble(uint64_t bits) {
+ double value = 0.0;
+ memcpy(&value, &bits, 8);
+ return value;
+}
+
+
+uint32_t Float16Sign(internal::SimFloat16 val) {
+ uint16_t rawbits = Float16ToRawbits(val);
+ return ExtractUnsignedBitfield32(15, 15, rawbits);
+}
+
+
+uint32_t Float16Exp(internal::SimFloat16 val) {
+ uint16_t rawbits = Float16ToRawbits(val);
+ return ExtractUnsignedBitfield32(14, 10, rawbits);
+}
+
+uint32_t Float16Mantissa(internal::SimFloat16 val) {
+ uint16_t rawbits = Float16ToRawbits(val);
+ return ExtractUnsignedBitfield32(9, 0, rawbits);
+}
+
+
+uint32_t FloatSign(float val) {
+ uint32_t rawbits = FloatToRawbits(val);
+ return ExtractUnsignedBitfield32(31, 31, rawbits);
+}
+
+
+uint32_t FloatExp(float val) {
+ uint32_t rawbits = FloatToRawbits(val);
+ return ExtractUnsignedBitfield32(30, 23, rawbits);
+}
+
+
+uint32_t FloatMantissa(float val) {
+ uint32_t rawbits = FloatToRawbits(val);
+ return ExtractUnsignedBitfield32(22, 0, rawbits);
+}
+
+
+uint32_t DoubleSign(double val) {
+ uint64_t rawbits = DoubleToRawbits(val);
+ return static_cast<uint32_t>(ExtractUnsignedBitfield64(63, 63, rawbits));
+}
+
+
+uint32_t DoubleExp(double val) {
+ uint64_t rawbits = DoubleToRawbits(val);
+ return static_cast<uint32_t>(ExtractUnsignedBitfield64(62, 52, rawbits));
+}
+
+
+uint64_t DoubleMantissa(double val) {
+ uint64_t rawbits = DoubleToRawbits(val);
+ return ExtractUnsignedBitfield64(51, 0, rawbits);
+}
+
+
+internal::SimFloat16 Float16Pack(uint16_t sign,
+ uint16_t exp,
+ uint16_t mantissa) {
+ uint16_t bits = (sign << 15) | (exp << 10) | mantissa;
+ return RawbitsToFloat16(bits);
+}
+
+
+float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa) {
+ uint32_t bits = (sign << 31) | (exp << 23) | mantissa;
+ return RawbitsToFloat(bits);
+}
+
+
+double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa) {
+ uint64_t bits = (sign << 63) | (exp << 52) | mantissa;
+ return RawbitsToDouble(bits);
+}
+
+
+int Float16Classify(Float16 value) {
+ uint16_t bits = Float16ToRawbits(value);
+ uint16_t exponent_max = (1 << 5) - 1;
+ uint16_t exponent_mask = exponent_max << 10;
+ uint16_t mantissa_mask = (1 << 10) - 1;
+
+ uint16_t exponent = (bits & exponent_mask) >> 10;
+ uint16_t mantissa = bits & mantissa_mask;
+ if (exponent == 0) {
+ if (mantissa == 0) {
+ return FP_ZERO;
+ }
+ return FP_SUBNORMAL;
+ } else if (exponent == exponent_max) {
+ if (mantissa == 0) {
+ return FP_INFINITE;
+ }
+ return FP_NAN;
+ }
+ return FP_NORMAL;
+}
+
+
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
+ VIXL_ASSERT((reg_size % 8) == 0);
+ int count = 0;
+ for (unsigned i = 0; i < (reg_size / 16); i++) {
+ if ((imm & 0xffff) == 0) {
+ count++;
+ }
+ imm >>= 16;
+ }
+ return count;
+}
+
+
+int BitCount(uint64_t value) { return CountSetBits(value); }
+
+// Float16 definitions.
+
+Float16::Float16(double dvalue) {
+ rawbits_ =
+ Float16ToRawbits(FPToFloat16(dvalue, FPTieEven, kIgnoreDefaultNaN));
+}
+
+namespace internal {
+
+SimFloat16 SimFloat16::operator-() const {
+ return RawbitsToFloat16(rawbits_ ^ 0x8000);
+}
+
+// SimFloat16 definitions.
+SimFloat16 SimFloat16::operator+(SimFloat16 rhs) const {
+ return static_cast<double>(*this) + static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator-(SimFloat16 rhs) const {
+ return static_cast<double>(*this) - static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator*(SimFloat16 rhs) const {
+ return static_cast<double>(*this) * static_cast<double>(rhs);
+}
+
+SimFloat16 SimFloat16::operator/(SimFloat16 rhs) const {
+ return static_cast<double>(*this) / static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator<(SimFloat16 rhs) const {
+ return static_cast<double>(*this) < static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator>(SimFloat16 rhs) const {
+ return static_cast<double>(*this) > static_cast<double>(rhs);
+}
+
+bool SimFloat16::operator==(SimFloat16 rhs) const {
+ if (IsNaN(*this) || IsNaN(rhs)) {
+ return false;
+ } else if (IsZero(rhs) && IsZero(*this)) {
+ // +0 and -0 should be treated as equal.
+ return true;
+ }
+ return this->rawbits_ == rhs.rawbits_;
+}
+
+bool SimFloat16::operator!=(SimFloat16 rhs) const { return !(*this == rhs); }
+
+bool SimFloat16::operator==(double rhs) const {
+ return static_cast<double>(*this) == static_cast<double>(rhs);
+}
+
+SimFloat16::operator double() const {
+ return FPToDouble(*this, kIgnoreDefaultNaN);
+}
+
+Int64 BitCount(Uint32 value) { return CountSetBits(value.Get()); }
+
+} // namespace internal
+
+float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception) {
+ uint16_t bits = Float16ToRawbits(value);
+ uint32_t sign = bits >> 15;
+ uint32_t exponent =
+ ExtractUnsignedBitfield32(kFloat16MantissaBits + kFloat16ExponentBits - 1,
+ kFloat16MantissaBits,
+ bits);
+ uint32_t mantissa =
+ ExtractUnsignedBitfield32(kFloat16MantissaBits - 1, 0, bits);
+
+ switch (Float16Classify(value)) {
+ case FP_ZERO:
+ return (sign == 0) ? 0.0f : -0.0f;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP32PositiveInfinity : kFP32NegativeInfinity;
+
+ case FP_SUBNORMAL: {
+ // Calculate shift required to put mantissa into the most-significant bits
+ // of the destination mantissa.
+ int shift = CountLeadingZeros(mantissa << (32 - 10));
+
+ // Shift mantissa and discard implicit '1'.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits) + shift + 1;
+ mantissa &= (1 << kFloatMantissaBits) - 1;
+
+ // Adjust the exponent for the shift applied, and rebias.
+ exponent = exponent - shift + (-15 + 127);
+ break;
+ }
+
+ case FP_NAN:
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ exponent = (1 << kFloatExponentBits) - 1;
+
+ // Increase bits in mantissa, making low-order bits 0.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+ mantissa |= 1 << 22; // Force a quiet NaN.
+ break;
+
+ case FP_NORMAL:
+ // Increase bits in mantissa, making low-order bits 0.
+ mantissa <<= (kFloatMantissaBits - kFloat16MantissaBits);
+
+ // Change exponent bias.
+ exponent += (-15 + 127);
+ break;
+
+ default:
+ VIXL_UNREACHABLE();
+ }
+ return RawbitsToFloat((sign << 31) | (exponent << kFloatMantissaBits) |
+ mantissa);
+}
+
+
+float FPToFloat(double value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+ USE(round_mode);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP32DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint64_t raw = DoubleToRawbits(value);
+
+ uint32_t sign = raw >> 63;
+ uint32_t exponent = (1 << 8) - 1;
+ uint32_t payload =
+ static_cast<uint32_t>(ExtractUnsignedBitfield64(50, 52 - 23, raw));
+ payload |= (1 << 22); // Force a quiet NaN.
+
+ return RawbitsToFloat((sign << 31) | (exponent << 23) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_INFINITE: {
+ // In a C++ cast, any value representable in the target type will be
+ // unchanged. This is always the case for +/-0.0 and infinities.
+ return static_cast<float>(value);
+ }
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-float as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+ uint64_t raw = DoubleToRawbits(value);
+ // Extract the IEEE-754 double components.
+ uint32_t sign = raw >> 63;
+ // Extract the exponent and remove the IEEE-754 encoding bias.
+ int32_t exponent =
+ static_cast<int32_t>(ExtractUnsignedBitfield64(62, 52, raw)) - 1023;
+ // Extract the mantissa and add the implicit '1' bit.
+ uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
+ if (std::fpclassify(value) == FP_NORMAL) {
+ mantissa |= (UINT64_C(1) << 52);
+ }
+ return FPRoundToFloat(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return value;
+}
+
+// TODO: We should consider implementing a full FPToDouble(Float16)
+// conversion function (for performance reasons).
+double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception) {
+ // We can rely on implicit float to double conversion here.
+ return FPToFloat(value, DN, exception);
+}
+
+
+double FPToDouble(float value, UseDefaultNaN DN, bool* exception) {
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP64DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred entirely, except that the top
+ // bit is forced to '1', making the result a quiet NaN. The unused
+ // (low-order) payload bits are set to 0.
+ uint32_t raw = FloatToRawbits(value);
+
+ uint64_t sign = raw >> 31;
+ uint64_t exponent = (1 << 11) - 1;
+ uint64_t payload = ExtractUnsignedBitfield64(21, 0, raw);
+ payload <<= (52 - 23); // The unused low-order bits should be 0.
+ payload |= (UINT64_C(1) << 51); // Force a quiet NaN.
+
+ return RawbitsToDouble((sign << 63) | (exponent << 52) | payload);
+ }
+
+ case FP_ZERO:
+ case FP_NORMAL:
+ case FP_SUBNORMAL:
+ case FP_INFINITE: {
+ // All other inputs are preserved in a standard cast, because every value
+ // representable using an IEEE-754 float is also representable using an
+ // IEEE-754 double.
+ return static_cast<double>(value);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return static_cast<double>(value);
+}
+
+
+Float16 FPToFloat16(float value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ uint32_t raw = FloatToRawbits(value);
+ int32_t sign = raw >> 31;
+ int32_t exponent = ExtractUnsignedBitfield32(30, 23, raw) - 127;
+ uint32_t mantissa = ExtractUnsignedBitfield32(22, 0, raw);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
+ : Float16ToRawbits(kFP16NegativeInfinity);
+ result |= mantissa >> (kFloatMantissaBits - kFloat16MantissaBits);
+ result |= (1 << 9); // Force a quiet NaN;
+ return RawbitsToFloat16(result);
+ }
+
+ case FP_ZERO:
+ return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert float-to-half as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+
+ // Add the implicit '1' bit to the mantissa.
+ mantissa += (1 << 23);
+ return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return kFP16PositiveZero;
+}
+
+
+Float16 FPToFloat16(double value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception) {
+ // Only the FPTieEven rounding mode is implemented.
+ VIXL_ASSERT(round_mode == FPTieEven);
+ USE(round_mode);
+
+ uint64_t raw = DoubleToRawbits(value);
+ int32_t sign = raw >> 63;
+ int64_t exponent = ExtractUnsignedBitfield64(62, 52, raw) - 1023;
+ uint64_t mantissa = ExtractUnsignedBitfield64(51, 0, raw);
+
+ switch (std::fpclassify(value)) {
+ case FP_NAN: {
+ if (IsSignallingNaN(value)) {
+ if (exception != NULL) {
+ *exception = true;
+ }
+ }
+ if (DN == kUseDefaultNaN) return kFP16DefaultNaN;
+
+ // Convert NaNs as the processor would:
+ // - The sign is propagated.
+ // - The payload (mantissa) is transferred as much as possible, except
+ // that the top bit is forced to '1', making the result a quiet NaN.
+ uint16_t result = (sign == 0) ? Float16ToRawbits(kFP16PositiveInfinity)
+ : Float16ToRawbits(kFP16NegativeInfinity);
+ result |= mantissa >> (kDoubleMantissaBits - kFloat16MantissaBits);
+ result |= (1 << 9); // Force a quiet NaN;
+ return RawbitsToFloat16(result);
+ }
+
+ case FP_ZERO:
+ return (sign == 0) ? kFP16PositiveZero : kFP16NegativeZero;
+
+ case FP_INFINITE:
+ return (sign == 0) ? kFP16PositiveInfinity : kFP16NegativeInfinity;
+ case FP_NORMAL:
+ case FP_SUBNORMAL: {
+ // Convert double-to-half as the processor would, assuming that FPCR.FZ
+ // (flush-to-zero) is not set.
+
+ // Add the implicit '1' bit to the mantissa.
+ mantissa += (UINT64_C(1) << 52);
+ return FPRoundToFloat16(sign, exponent, mantissa, round_mode);
+ }
+ }
+
+ VIXL_UNREACHABLE();
+ return kFP16PositiveZero;
+}
+
+} // namespace vixl
diff --git a/js/src/jit/arm64/vixl/Utils-vixl.h b/js/src/jit/arm64/vixl/Utils-vixl.h
new file mode 100644
index 0000000000..d1f6a835f8
--- /dev/null
+++ b/js/src/jit/arm64/vixl/Utils-vixl.h
@@ -0,0 +1,1283 @@
+// Copyright 2015, VIXL authors
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+//
+// * Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above copyright notice,
+// this list of conditions and the following disclaimer in the documentation
+// and/or other materials provided with the distribution.
+// * Neither the name of ARM Limited nor the names of its contributors may be
+// used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND
+// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
+// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef VIXL_UTILS_H
+#define VIXL_UTILS_H
+
+#include "mozilla/FloatingPoint.h"
+
+#include <cmath>
+#include <cstring>
+#include <limits>
+#include <vector>
+
+#include "jit/arm64/vixl/CompilerIntrinsics-vixl.h"
+#include "jit/arm64/vixl/Globals-vixl.h"
+
+namespace vixl {
+
+// Macros for compile-time format checking.
+#if GCC_VERSION_OR_NEWER(4, 4, 0)
+#define PRINTF_CHECK(format_index, varargs_index) \
+ __attribute__((format(gnu_printf, format_index, varargs_index)))
+#else
+#define PRINTF_CHECK(format_index, varargs_index)
+#endif
+
+#ifdef __GNUC__
+#define VIXL_HAS_DEPRECATED_WITH_MSG
+#elif defined(__clang__)
+#ifdef __has_extension
+#define VIXL_HAS_DEPRECATED_WITH_MSG
+#endif
+#endif
+
+#ifdef VIXL_HAS_DEPRECATED_WITH_MSG
+#define VIXL_DEPRECATED(replaced_by, declarator) \
+ __attribute__((deprecated("Use \"" replaced_by "\" instead"))) declarator
+#else
+#define VIXL_DEPRECATED(replaced_by, declarator) declarator
+#endif
+
+#ifdef VIXL_DEBUG
+#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_UNREACHABLE()
+#else
+#define VIXL_UNREACHABLE_OR_FALLTHROUGH() VIXL_FALLTHROUGH()
+#endif
+
+template <typename T, size_t n>
+size_t ArrayLength(const T (&)[n]) {
+ return n;
+}
+
+// Check number width.
+// TODO: Refactor these using templates.
+inline bool IsIntN(unsigned n, uint32_t x) {
+ VIXL_ASSERT((0 < n) && (n < 32));
+ uint32_t limit = UINT32_C(1) << (n - 1);
+ return x < limit;
+}
+inline bool IsIntN(unsigned n, int32_t x) {
+ VIXL_ASSERT((0 < n) && (n < 32));
+ int32_t limit = INT32_C(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+inline bool IsIntN(unsigned n, uint64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ uint64_t limit = UINT64_C(1) << (n - 1);
+ return x < limit;
+}
+inline bool IsIntN(unsigned n, int64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ int64_t limit = INT64_C(1) << (n - 1);
+ return (-limit <= x) && (x < limit);
+}
+VIXL_DEPRECATED("IsIntN", inline bool is_intn(unsigned n, int64_t x)) {
+ return IsIntN(n, x);
+}
+
+inline bool IsUintN(unsigned n, uint32_t x) {
+ VIXL_ASSERT((0 < n) && (n < 32));
+ return !(x >> n);
+}
+inline bool IsUintN(unsigned n, int32_t x) {
+ VIXL_ASSERT((0 < n) && (n < 32));
+ // Convert to an unsigned integer to avoid implementation-defined behavior.
+ return !(static_cast<uint32_t>(x) >> n);
+}
+inline bool IsUintN(unsigned n, uint64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ return !(x >> n);
+}
+inline bool IsUintN(unsigned n, int64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ // Convert to an unsigned integer to avoid implementation-defined behavior.
+ return !(static_cast<uint64_t>(x) >> n);
+}
+VIXL_DEPRECATED("IsUintN", inline bool is_uintn(unsigned n, int64_t x)) {
+ return IsUintN(n, x);
+}
+
+inline uint64_t TruncateToUintN(unsigned n, uint64_t x) {
+ VIXL_ASSERT((0 < n) && (n < 64));
+ return static_cast<uint64_t>(x) & ((UINT64_C(1) << n) - 1);
+}
+VIXL_DEPRECATED("TruncateToUintN",
+ inline uint64_t truncate_to_intn(unsigned n, int64_t x)) {
+ return TruncateToUintN(n, x);
+}
+
+// clang-format off
+#define INT_1_TO_32_LIST(V) \
+V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \
+V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \
+V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \
+V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32)
+
+#define INT_33_TO_63_LIST(V) \
+V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
+V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \
+V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \
+V(57) V(58) V(59) V(60) V(61) V(62) V(63)
+
+#define INT_1_TO_63_LIST(V) INT_1_TO_32_LIST(V) INT_33_TO_63_LIST(V)
+
+// clang-format on
+
+#define DECLARE_IS_INT_N(N) \
+ inline bool IsInt##N(int64_t x) { return IsIntN(N, x); } \
+ VIXL_DEPRECATED("IsInt" #N, inline bool is_int##N(int64_t x)) { \
+ return IsIntN(N, x); \
+ }
+
+#define DECLARE_IS_UINT_N(N) \
+ inline bool IsUint##N(int64_t x) { return IsUintN(N, x); } \
+ VIXL_DEPRECATED("IsUint" #N, inline bool is_uint##N(int64_t x)) { \
+ return IsUintN(N, x); \
+ }
+
+#define DECLARE_TRUNCATE_TO_UINT_32(N) \
+ inline uint32_t TruncateToUint##N(uint64_t x) { \
+ return static_cast<uint32_t>(TruncateToUintN(N, x)); \
+ } \
+ VIXL_DEPRECATED("TruncateToUint" #N, \
+ inline uint32_t truncate_to_int##N(int64_t x)) { \
+ return TruncateToUint##N(x); \
+ }
+
+INT_1_TO_63_LIST(DECLARE_IS_INT_N)
+INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
+INT_1_TO_32_LIST(DECLARE_TRUNCATE_TO_UINT_32)
+
+#undef DECLARE_IS_INT_N
+#undef DECLARE_IS_UINT_N
+#undef DECLARE_TRUNCATE_TO_INT_N
+
+// Bit field extraction.
+inline uint64_t ExtractUnsignedBitfield64(int msb, int lsb, uint64_t x) {
+ VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+ (msb >= lsb));
+ if ((msb == 63) && (lsb == 0)) return x;
+ return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
+}
+
+
+inline uint32_t ExtractUnsignedBitfield32(int msb, int lsb, uint32_t x) {
+ VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+ (msb >= lsb));
+ return TruncateToUint32(ExtractUnsignedBitfield64(msb, lsb, x));
+}
+
+
+inline int64_t ExtractSignedBitfield64(int msb, int lsb, int64_t x) {
+ VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+ (msb >= lsb));
+ uint64_t temp = ExtractUnsignedBitfield64(msb, lsb, x);
+ // If the highest extracted bit is set, sign extend.
+ if ((temp >> (msb - lsb)) == 1) {
+ temp |= ~UINT64_C(0) << (msb - lsb);
+ }
+ int64_t result;
+ memcpy(&result, &temp, sizeof(result));
+ return result;
+}
+
+
+inline int32_t ExtractSignedBitfield32(int msb, int lsb, int32_t x) {
+ VIXL_ASSERT((static_cast<size_t>(msb) < sizeof(x) * 8) && (lsb >= 0) &&
+ (msb >= lsb));
+ uint32_t temp = TruncateToUint32(ExtractSignedBitfield64(msb, lsb, x));
+ int32_t result;
+ memcpy(&result, &temp, sizeof(result));
+ return result;
+}
+
+
+inline uint64_t RotateRight(uint64_t value,
+ unsigned int rotate,
+ unsigned int width) {
+ VIXL_ASSERT((width > 0) && (width <= 64));
+ uint64_t width_mask = ~UINT64_C(0) >> (64 - width);
+ rotate &= 63;
+ if (rotate > 0) {
+ value &= width_mask;
+ value = (value << (width - rotate)) | (value >> rotate);
+ }
+ return value & width_mask;
+}
+
+
+// Wrapper class for passing FP16 values through the assembler.
+// This is purely to aid with type checking/casting.
+class Float16 {
+ public:
+ explicit Float16(double dvalue);
+ Float16() : rawbits_(0x0) {}
+ friend uint16_t Float16ToRawbits(Float16 value);
+ friend Float16 RawbitsToFloat16(uint16_t bits);
+
+ protected:
+ uint16_t rawbits_;
+};
+
+// Floating point representation.
+uint16_t Float16ToRawbits(Float16 value);
+
+
+uint32_t FloatToRawbits(float value);
+VIXL_DEPRECATED("FloatToRawbits",
+ inline uint32_t float_to_rawbits(float value)) {
+ return FloatToRawbits(value);
+}
+
+uint64_t DoubleToRawbits(double value);
+VIXL_DEPRECATED("DoubleToRawbits",
+ inline uint64_t double_to_rawbits(double value)) {
+ return DoubleToRawbits(value);
+}
+
+Float16 RawbitsToFloat16(uint16_t bits);
+
+float RawbitsToFloat(uint32_t bits);
+VIXL_DEPRECATED("RawbitsToFloat",
+ inline float rawbits_to_float(uint32_t bits)) {
+ return RawbitsToFloat(bits);
+}
+
+double RawbitsToDouble(uint64_t bits);
+VIXL_DEPRECATED("RawbitsToDouble",
+ inline double rawbits_to_double(uint64_t bits)) {
+ return RawbitsToDouble(bits);
+}
+
+namespace internal {
+
+// Internal simulation class used solely by the simulator to
+// provide an abstraction layer for any half-precision arithmetic.
+class SimFloat16 : public Float16 {
+ public:
+ // TODO: We should investigate making this constructor explicit.
+ // This is currently difficult to do due to a number of templated
+ // functions in the simulator which rely on returning double values.
+ SimFloat16(double dvalue) : Float16(dvalue) {} // NOLINT(runtime/explicit)
+ SimFloat16(Float16 f) { // NOLINT(runtime/explicit)
+ this->rawbits_ = Float16ToRawbits(f);
+ }
+ SimFloat16() : Float16() {}
+ SimFloat16 operator-() const;
+ SimFloat16 operator+(SimFloat16 rhs) const;
+ SimFloat16 operator-(SimFloat16 rhs) const;
+ SimFloat16 operator*(SimFloat16 rhs) const;
+ SimFloat16 operator/(SimFloat16 rhs) const;
+ bool operator<(SimFloat16 rhs) const;
+ bool operator>(SimFloat16 rhs) const;
+ bool operator==(SimFloat16 rhs) const;
+ bool operator!=(SimFloat16 rhs) const;
+ // This is necessary for conversions peformed in (macro asm) Fmov.
+ bool operator==(double rhs) const;
+ operator double() const;
+};
+} // namespace internal
+
+uint32_t Float16Sign(internal::SimFloat16 value);
+
+uint32_t Float16Exp(internal::SimFloat16 value);
+
+uint32_t Float16Mantissa(internal::SimFloat16 value);
+
+uint32_t FloatSign(float value);
+VIXL_DEPRECATED("FloatSign", inline uint32_t float_sign(float value)) {
+ return FloatSign(value);
+}
+
+uint32_t FloatExp(float value);
+VIXL_DEPRECATED("FloatExp", inline uint32_t float_exp(float value)) {
+ return FloatExp(value);
+}
+
+uint32_t FloatMantissa(float value);
+VIXL_DEPRECATED("FloatMantissa", inline uint32_t float_mantissa(float value)) {
+ return FloatMantissa(value);
+}
+
+uint32_t DoubleSign(double value);
+VIXL_DEPRECATED("DoubleSign", inline uint32_t double_sign(double value)) {
+ return DoubleSign(value);
+}
+
+uint32_t DoubleExp(double value);
+VIXL_DEPRECATED("DoubleExp", inline uint32_t double_exp(double value)) {
+ return DoubleExp(value);
+}
+
+uint64_t DoubleMantissa(double value);
+VIXL_DEPRECATED("DoubleMantissa",
+ inline uint64_t double_mantissa(double value)) {
+ return DoubleMantissa(value);
+}
+
+internal::SimFloat16 Float16Pack(uint16_t sign,
+ uint16_t exp,
+ uint16_t mantissa);
+
+float FloatPack(uint32_t sign, uint32_t exp, uint32_t mantissa);
+VIXL_DEPRECATED("FloatPack",
+ inline float float_pack(uint32_t sign,
+ uint32_t exp,
+ uint32_t mantissa)) {
+ return FloatPack(sign, exp, mantissa);
+}
+
+double DoublePack(uint64_t sign, uint64_t exp, uint64_t mantissa);
+VIXL_DEPRECATED("DoublePack",
+ inline double double_pack(uint32_t sign,
+ uint32_t exp,
+ uint64_t mantissa)) {
+ return DoublePack(sign, exp, mantissa);
+}
+
+// An fpclassify() function for 16-bit half-precision floats.
+int Float16Classify(Float16 value);
+VIXL_DEPRECATED("Float16Classify", inline int float16classify(uint16_t value)) {
+ return Float16Classify(RawbitsToFloat16(value));
+}
+
+bool IsZero(Float16 value);
+
+inline bool IsNaN(float value) { return std::isnan(value); }
+
+inline bool IsNaN(double value) { return std::isnan(value); }
+
+inline bool IsNaN(Float16 value) { return Float16Classify(value) == FP_NAN; }
+
+inline bool IsInf(float value) { return std::isinf(value); }
+
+inline bool IsInf(double value) { return std::isinf(value); }
+
+inline bool IsInf(Float16 value) {
+ return Float16Classify(value) == FP_INFINITE;
+}
+
+
+// NaN tests.
+inline bool IsSignallingNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
+ uint64_t raw = DoubleToRawbits(num);
+ if (IsNaN(num) && ((raw & kFP64QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(float num) {
+ const uint32_t kFP32QuietNaNMask = 0x00400000;
+ uint32_t raw = FloatToRawbits(num);
+ if (IsNaN(num) && ((raw & kFP32QuietNaNMask) == 0)) {
+ return true;
+ }
+ return false;
+}
+
+
+inline bool IsSignallingNaN(Float16 num) {
+ const uint16_t kFP16QuietNaNMask = 0x0200;
+ return IsNaN(num) && ((Float16ToRawbits(num) & kFP16QuietNaNMask) == 0);
+}
+
+
+template <typename T>
+inline bool IsQuietNaN(T num) {
+ return IsNaN(num) && !IsSignallingNaN(num);
+}
+
+
+// Convert the NaN in 'num' to a quiet NaN.
+inline double ToQuietNaN(double num) {
+ const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000);
+ VIXL_ASSERT(IsNaN(num));
+ return RawbitsToDouble(DoubleToRawbits(num) | kFP64QuietNaNMask);
+}
+
+
+inline float ToQuietNaN(float num) {
+ const uint32_t kFP32QuietNaNMask = 0x00400000;
+ VIXL_ASSERT(IsNaN(num));
+ return RawbitsToFloat(FloatToRawbits(num) | kFP32QuietNaNMask);
+}
+
+
+inline internal::SimFloat16 ToQuietNaN(internal::SimFloat16 num) {
+ const uint16_t kFP16QuietNaNMask = 0x0200;
+ VIXL_ASSERT(IsNaN(num));
+ return internal::SimFloat16(
+ RawbitsToFloat16(Float16ToRawbits(num) | kFP16QuietNaNMask));
+}
+
+
+// Fused multiply-add.
+inline double FusedMultiplyAdd(double op1, double op2, double a) {
+ return fma(op1, op2, a);
+}
+
+
+inline float FusedMultiplyAdd(float op1, float op2, float a) {
+ return fmaf(op1, op2, a);
+}
+
+
+inline uint64_t LowestSetBit(uint64_t value) { return value & -value; }
+
+
+template <typename T>
+inline int HighestSetBitPosition(T value) {
+ VIXL_ASSERT(value != 0);
+ return (sizeof(value) * 8 - 1) - CountLeadingZeros(value);
+}
+
+
+template <typename V>
+inline int WhichPowerOf2(V value) {
+ VIXL_ASSERT(IsPowerOf2(value));
+ return CountTrailingZeros(value);
+}
+
+
+unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
+
+
+int BitCount(uint64_t value);
+
+
+template <typename T>
+T ReverseBits(T value) {
+ VIXL_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+ (sizeof(value) == 4) || (sizeof(value) == 8));
+ T result = 0;
+ for (unsigned i = 0; i < (sizeof(value) * 8); i++) {
+ result = (result << 1) | (value & 1);
+ value >>= 1;
+ }
+ return result;
+}
+
+
+template <typename T>
+inline T SignExtend(T val, int bitSize) {
+ VIXL_ASSERT(bitSize > 0);
+ T mask = (T(2) << (bitSize - 1)) - T(1);
+ val &= mask;
+ T sign_bits = -((val >> (bitSize - 1)) << bitSize);
+ val |= sign_bits;
+ return val;
+}
+
+
+template <typename T>
+T ReverseBytes(T value, int block_bytes_log2) {
+ VIXL_ASSERT((sizeof(value) == 4) || (sizeof(value) == 8));
+ VIXL_ASSERT((1U << block_bytes_log2) <= sizeof(value));
+ // Split the 64-bit value into an 8-bit array, where b[0] is the least
+ // significant byte, and b[7] is the most significant.
+ uint8_t bytes[8];
+ uint64_t mask = UINT64_C(0xff00000000000000);
+ for (int i = 7; i >= 0; i--) {
+ bytes[i] = (static_cast<uint64_t>(value) & mask) >> (i * 8);
+ mask >>= 8;
+ }
+
+ // Permutation tables for REV instructions.
+ // permute_table[0] is used by REV16_x, REV16_w
+ // permute_table[1] is used by REV32_x, REV_w
+ // permute_table[2] is used by REV_x
+ VIXL_ASSERT((0 < block_bytes_log2) && (block_bytes_log2 < 4));
+ static const uint8_t permute_table[3][8] = {{6, 7, 4, 5, 2, 3, 0, 1},
+ {4, 5, 6, 7, 0, 1, 2, 3},
+ {0, 1, 2, 3, 4, 5, 6, 7}};
+ uint64_t temp = 0;
+ for (int i = 0; i < 8; i++) {
+ temp <<= 8;
+ temp |= bytes[permute_table[block_bytes_log2 - 1][i]];
+ }
+
+ T result;
+ VIXL_STATIC_ASSERT(sizeof(result) <= sizeof(temp));
+ memcpy(&result, &temp, sizeof(result));
+ return result;
+}
+
+template <unsigned MULTIPLE, typename T>
+inline bool IsMultiple(T value) {
+ VIXL_ASSERT(IsPowerOf2(MULTIPLE));
+ return (value & (MULTIPLE - 1)) == 0;
+}
+
+template <typename T>
+inline bool IsMultiple(T value, unsigned multiple) {
+ VIXL_ASSERT(IsPowerOf2(multiple));
+ return (value & (multiple - 1)) == 0;
+}
+
+template <typename T>
+inline bool IsAligned(T pointer, int alignment) {
+ VIXL_ASSERT(IsPowerOf2(alignment));
+ return (pointer & (alignment - 1)) == 0;
+}
+
+// Pointer alignment
+// TODO: rename/refactor to make it specific to instructions.
+template <unsigned ALIGN, typename T>
+inline bool IsAligned(T pointer) {
+ VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof)
+ // Use C-style casts to get static_cast behaviour for integral types (T), and
+ // reinterpret_cast behaviour for other types.
+ return IsAligned((intptr_t)(pointer), ALIGN);
+}
+
+template <typename T>
+bool IsWordAligned(T pointer) {
+ return IsAligned<4>(pointer);
+}
+
+// Increment a pointer until it has the specified alignment. The alignment must
+// be a power of two.
+template <class T>
+T AlignUp(T pointer,
+ typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
+ VIXL_ASSERT(IsPowerOf2(alignment));
+ // Use C-style casts to get static_cast behaviour for integral types (T), and
+ // reinterpret_cast behaviour for other types.
+
+ typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
+ (typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
+
+ size_t mask = alignment - 1;
+ T result = (T)((pointer_raw + mask) & ~mask);
+ VIXL_ASSERT(result >= pointer);
+
+ return result;
+}
+
+// Decrement a pointer until it has the specified alignment. The alignment must
+// be a power of two.
+template <class T>
+T AlignDown(T pointer,
+ typename Unsigned<sizeof(T) * kBitsPerByte>::type alignment) {
+ VIXL_ASSERT(IsPowerOf2(alignment));
+ // Use C-style casts to get static_cast behaviour for integral types (T), and
+ // reinterpret_cast behaviour for other types.
+
+ typename Unsigned<sizeof(T)* kBitsPerByte>::type pointer_raw =
+ (typename Unsigned<sizeof(T) * kBitsPerByte>::type)pointer;
+ VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
+
+ size_t mask = alignment - 1;
+ return (T)(pointer_raw & ~mask);
+}
+
+
+template <typename T>
+inline T ExtractBit(T value, unsigned bit) {
+ return (value >> bit) & T(1);
+}
+
+template <typename Ts, typename Td>
+inline Td ExtractBits(Ts value, int least_significant_bit, Td mask) {
+ return Td((value >> least_significant_bit) & Ts(mask));
+}
+
+template <typename Ts, typename Td>
+inline void AssignBit(Td& dst, // NOLINT(runtime/references)
+ int bit,
+ Ts value) {
+ VIXL_ASSERT((value == Ts(0)) || (value == Ts(1)));
+ VIXL_ASSERT(bit >= 0);
+ VIXL_ASSERT(bit < static_cast<int>(sizeof(Td) * 8));
+ Td mask(1);
+ dst &= ~(mask << bit);
+ dst |= Td(value) << bit;
+}
+
+template <typename Td, typename Ts>
+inline void AssignBits(Td& dst, // NOLINT(runtime/references)
+ int least_significant_bit,
+ Ts mask,
+ Ts value) {
+ VIXL_ASSERT(least_significant_bit >= 0);
+ VIXL_ASSERT(least_significant_bit < static_cast<int>(sizeof(Td) * 8));
+ VIXL_ASSERT(((Td(mask) << least_significant_bit) >> least_significant_bit) ==
+ Td(mask));
+ VIXL_ASSERT((value & mask) == value);
+ dst &= ~(Td(mask) << least_significant_bit);
+ dst |= Td(value) << least_significant_bit;
+}
+
+class VFP {
+ public:
+ static uint32_t FP32ToImm8(float imm) {
+ // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = FloatToRawbits(imm);
+ // bit7: a000.0000
+ uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
+ return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+ }
+ static uint32_t FP64ToImm8(double imm) {
+ // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = DoubleToRawbits(imm);
+ // bit7: a000.0000
+ uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
+ // bit6: 0b00.0000
+ uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
+ // bit5_to_0: 00cd.efgh
+ uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
+
+ return static_cast<uint32_t>(bit7 | bit6 | bit5_to_0);
+ }
+ static float Imm8ToFP32(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Single: aBbb.bbbc.defg.h000.0000.0000.0000.0000 (32 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint32_t bit7 = (bits >> 7) & 0x1;
+ uint32_t bit6 = (bits >> 6) & 0x1;
+ uint32_t bit5_to_0 = bits & 0x3f;
+ uint32_t result = (bit7 << 31) | ((32 - bit6) << 25) | (bit5_to_0 << 19);
+
+ return RawbitsToFloat(result);
+ }
+ static double Imm8ToFP64(uint32_t imm8) {
+ // Imm8: abcdefgh (8 bits)
+ // Double: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000 (64 bits)
+ // where B is b ^ 1
+ uint32_t bits = imm8;
+ uint64_t bit7 = (bits >> 7) & 0x1;
+ uint64_t bit6 = (bits >> 6) & 0x1;
+ uint64_t bit5_to_0 = bits & 0x3f;
+ uint64_t result = (bit7 << 63) | ((256 - bit6) << 54) | (bit5_to_0 << 48);
+ return RawbitsToDouble(result);
+ }
+ static bool IsImmFP32(float imm) {
+ // Valid values will have the form:
+ // aBbb.bbbc.defg.h000.0000.0000.0000.0000
+ uint32_t bits = FloatToRawbits(imm);
+ // bits[19..0] are cleared.
+ if ((bits & 0x7ffff) != 0) {
+ return false;
+ }
+
+
+ // bits[29..25] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 16) & 0x3e00;
+ if (b_pattern != 0 && b_pattern != 0x3e00) {
+ return false;
+ }
+ // bit[30] and bit[29] are opposite.
+ if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
+ return false;
+ }
+ return true;
+ }
+ static bool IsImmFP64(double imm) {
+ // Valid values will have the form:
+ // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
+ // 0000.0000.0000.0000.0000.0000.0000.0000
+ uint64_t bits = DoubleToRawbits(imm);
+ // bits[47..0] are cleared.
+ if ((bits & 0x0000ffffffffffff) != 0) {
+ return false;
+ }
+ // bits[61..54] are all set or all cleared.
+ uint32_t b_pattern = (bits >> 48) & 0x3fc0;
+ if ((b_pattern != 0) && (b_pattern != 0x3fc0)) {
+ return false;
+ }
+ // bit[62] and bit[61] are opposite.
+ if (((bits ^ (bits << 1)) & (UINT64_C(1) << 62)) == 0) {
+ return false;
+ }
+ return true;
+ }
+};
+
+class BitField {
+ // ForEachBitHelper is a functor that will call
+ // bool ForEachBitHelper::execute(ElementType id) const
+ // and expects a boolean in return whether to continue (if true)
+ // or stop (if false)
+ // check_set will check if the bits are on (true) or off(false)
+ template <typename ForEachBitHelper, bool check_set>
+ bool ForEachBit(const ForEachBitHelper& helper) {
+ for (int i = 0; static_cast<size_t>(i) < bitfield_.size(); i++) {
+ if (bitfield_[i] == check_set)
+ if (!helper.execute(i)) return false;
+ }
+ return true;
+ }
+
+ public:
+ explicit BitField(unsigned size) : bitfield_(size, 0) {}
+
+ void Set(int i) {
+ VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
+ bitfield_[i] = true;
+ }
+
+ void Unset(int i) {
+ VIXL_ASSERT((i >= 0) && (static_cast<size_t>(i) < bitfield_.size()));
+ bitfield_[i] = true;
+ }
+
+ bool IsSet(int i) const { return bitfield_[i]; }
+
+ // For each bit not set in the bitfield call the execute functor
+ // execute.
+ // ForEachBitSetHelper::execute returns true if the iteration through
+ // the bits can continue, otherwise it will stop.
+ // struct ForEachBitSetHelper {
+ // bool execute(int /*id*/) { return false; }
+ // };
+ template <typename ForEachBitNotSetHelper>
+ bool ForEachBitNotSet(const ForEachBitNotSetHelper& helper) {
+ return ForEachBit<ForEachBitNotSetHelper, false>(helper);
+ }
+
+ // For each bit set in the bitfield call the execute functor
+ // execute.
+ template <typename ForEachBitSetHelper>
+ bool ForEachBitSet(const ForEachBitSetHelper& helper) {
+ return ForEachBit<ForEachBitSetHelper, true>(helper);
+ }
+
+ private:
+ std::vector<bool> bitfield_;
+};
+
+namespace internal {
+
+typedef int64_t Int64;
+class Uint64;
+class Uint128;
+
+class Uint32 {
+ uint32_t data_;
+
+ public:
+ // Unlike uint32_t, Uint32 has a default constructor.
+ Uint32() { data_ = 0; }
+ explicit Uint32(uint32_t data) : data_(data) {}
+ inline explicit Uint32(Uint64 data);
+ uint32_t Get() const { return data_; }
+ template <int N>
+ int32_t GetSigned() const {
+ return ExtractSignedBitfield32(N - 1, 0, data_);
+ }
+ int32_t GetSigned() const { return data_; }
+ Uint32 operator~() const { return Uint32(~data_); }
+ Uint32 operator-() const { return Uint32(-data_); }
+ bool operator==(Uint32 value) const { return data_ == value.data_; }
+ bool operator!=(Uint32 value) const { return data_ != value.data_; }
+ bool operator>(Uint32 value) const { return data_ > value.data_; }
+ Uint32 operator+(Uint32 value) const { return Uint32(data_ + value.data_); }
+ Uint32 operator-(Uint32 value) const { return Uint32(data_ - value.data_); }
+ Uint32 operator&(Uint32 value) const { return Uint32(data_ & value.data_); }
+ Uint32 operator&=(Uint32 value) {
+ data_ &= value.data_;
+ return *this;
+ }
+ Uint32 operator^(Uint32 value) const { return Uint32(data_ ^ value.data_); }
+ Uint32 operator^=(Uint32 value) {
+ data_ ^= value.data_;
+ return *this;
+ }
+ Uint32 operator|(Uint32 value) const { return Uint32(data_ | value.data_); }
+ Uint32 operator|=(Uint32 value) {
+ data_ |= value.data_;
+ return *this;
+ }
+ // Unlike uint32_t, the shift functions can accept negative shift and
+ // return 0 when the shift is too big.
+ Uint32 operator>>(int shift) const {
+ if (shift == 0) return *this;
+ if (shift < 0) {
+ int tmp = -shift;
+ if (tmp >= 32) return Uint32(0);
+ return Uint32(data_ << tmp);
+ }
+ int tmp = shift;
+ if (tmp >= 32) return Uint32(0);
+ return Uint32(data_ >> tmp);
+ }
+ Uint32 operator<<(int shift) const {
+ if (shift == 0) return *this;
+ if (shift < 0) {
+ int tmp = -shift;
+ if (tmp >= 32) return Uint32(0);
+ return Uint32(data_ >> tmp);
+ }
+ int tmp = shift;
+ if (tmp >= 32) return Uint32(0);
+ return Uint32(data_ << tmp);
+ }
+};
+
+class Uint64 {
+ uint64_t data_;
+
+ public:
+ // Unlike uint64_t, Uint64 has a default constructor.
+ Uint64() { data_ = 0; }
+ explicit Uint64(uint64_t data) : data_(data) {}
+ explicit Uint64(Uint32 data) : data_(data.Get()) {}
+ inline explicit Uint64(Uint128 data);
+ uint64_t Get() const { return data_; }
+ int64_t GetSigned(int N) const {
+ return ExtractSignedBitfield64(N - 1, 0, data_);
+ }
+ int64_t GetSigned() const { return data_; }
+ Uint32 ToUint32() const {
+ VIXL_ASSERT((data_ >> 32) == 0);
+ return Uint32(static_cast<uint32_t>(data_));
+ }
+ Uint32 GetHigh32() const { return Uint32(data_ >> 32); }
+ Uint32 GetLow32() const { return Uint32(data_ & 0xffffffff); }
+ Uint64 operator~() const { return Uint64(~data_); }
+ Uint64 operator-() const { return Uint64(-data_); }
+ bool operator==(Uint64 value) const { return data_ == value.data_; }
+ bool operator!=(Uint64 value) const { return data_ != value.data_; }
+ Uint64 operator+(Uint64 value) const { return Uint64(data_ + value.data_); }
+ Uint64 operator-(Uint64 value) const { return Uint64(data_ - value.data_); }
+ Uint64 operator&(Uint64 value) const { return Uint64(data_ & value.data_); }
+ Uint64 operator&=(Uint64 value) {
+ data_ &= value.data_;
+ return *this;
+ }
+ Uint64 operator^(Uint64 value) const { return Uint64(data_ ^ value.data_); }
+ Uint64 operator^=(Uint64 value) {
+ data_ ^= value.data_;
+ return *this;
+ }
+ Uint64 operator|(Uint64 value) const { return Uint64(data_ | value.data_); }
+ Uint64 operator|=(Uint64 value) {
+ data_ |= value.data_;
+ return *this;
+ }
+ // Unlike uint64_t, the shift functions can accept negative shift and
+ // return 0 when the shift is too big.
+ Uint64 operator>>(int shift) const {
+ if (shift == 0) return *this;
+ if (shift < 0) {
+ int tmp = -shift;
+ if (tmp >= 64) return Uint64(0);
+ return Uint64(data_ << tmp);
+ }
+ int tmp = shift;
+ if (tmp >= 64) return Uint64(0);
+ return Uint64(data_ >> tmp);
+ }
+ Uint64 operator<<(int shift) const {
+ if (shift == 0) return *this;
+ if (shift < 0) {
+ int tmp = -shift;
+ if (tmp >= 64) return Uint64(0);
+ return Uint64(data_ >> tmp);
+ }
+ int tmp = shift;
+ if (tmp >= 64) return Uint64(0);
+ return Uint64(data_ << tmp);
+ }
+};
+
+class Uint128 {
+ uint64_t data_high_;
+ uint64_t data_low_;
+
+ public:
+ Uint128() : data_high_(0), data_low_(0) {}
+ explicit Uint128(uint64_t data_low) : data_high_(0), data_low_(data_low) {}
+ explicit Uint128(Uint64 data_low)
+ : data_high_(0), data_low_(data_low.Get()) {}
+ Uint128(uint64_t data_high, uint64_t data_low)
+ : data_high_(data_high), data_low_(data_low) {}
+ Uint64 ToUint64() const {
+ VIXL_ASSERT(data_high_ == 0);
+ return Uint64(data_low_);
+ }
+ Uint64 GetHigh64() const { return Uint64(data_high_); }
+ Uint64 GetLow64() const { return Uint64(data_low_); }
+ Uint128 operator~() const { return Uint128(~data_high_, ~data_low_); }
+ bool operator==(Uint128 value) const {
+ return (data_high_ == value.data_high_) && (data_low_ == value.data_low_);
+ }
+ Uint128 operator&(Uint128 value) const {
+ return Uint128(data_high_ & value.data_high_, data_low_ & value.data_low_);
+ }
+ Uint128 operator&=(Uint128 value) {
+ data_high_ &= value.data_high_;
+ data_low_ &= value.data_low_;
+ return *this;
+ }
+ Uint128 operator|=(Uint128 value) {
+ data_high_ |= value.data_high_;
+ data_low_ |= value.data_low_;
+ return *this;
+ }
+ Uint128 operator>>(int shift) const {
+ VIXL_ASSERT((shift >= 0) && (shift < 128));
+ if (shift == 0) return *this;
+ if (shift >= 64) {
+ return Uint128(0, data_high_ >> (shift - 64));
+ }
+ uint64_t tmp = (data_high_ << (64 - shift)) | (data_low_ >> shift);
+ return Uint128(data_high_ >> shift, tmp);
+ }
+ Uint128 operator<<(int shift) const {
+ VIXL_ASSERT((shift >= 0) && (shift < 128));
+ if (shift == 0) return *this;
+ if (shift >= 64) {
+ return Uint128(data_low_ << (shift - 64), 0);
+ }
+ uint64_t tmp = (data_high_ << shift) | (data_low_ >> (64 - shift));
+ return Uint128(tmp, data_low_ << shift);
+ }
+};
+
+Uint32::Uint32(Uint64 data) : data_(data.ToUint32().Get()) {}
+Uint64::Uint64(Uint128 data) : data_(data.ToUint64().Get()) {}
+
+Int64 BitCount(Uint32 value);
+
+} // namespace internal
+
+// The default NaN values (for FPCR.DN=1).
+extern const double kFP64DefaultNaN;
+extern const float kFP32DefaultNaN;
+extern const Float16 kFP16DefaultNaN;
+
+// Floating-point infinity values.
+extern const Float16 kFP16PositiveInfinity;
+extern const Float16 kFP16NegativeInfinity;
+extern const float kFP32PositiveInfinity;
+extern const float kFP32NegativeInfinity;
+extern const double kFP64PositiveInfinity;
+extern const double kFP64NegativeInfinity;
+
+// Floating-point zero values.
+extern const Float16 kFP16PositiveZero;
+extern const Float16 kFP16NegativeZero;
+
+// AArch64 floating-point specifics. These match IEEE-754.
+const unsigned kDoubleMantissaBits = 52;
+const unsigned kDoubleExponentBits = 11;
+const unsigned kFloatMantissaBits = 23;
+const unsigned kFloatExponentBits = 8;
+const unsigned kFloat16MantissaBits = 10;
+const unsigned kFloat16ExponentBits = 5;
+
+enum FPRounding {
+ // The first four values are encodable directly by FPCR<RMode>.
+ FPTieEven = 0x0,
+ FPPositiveInfinity = 0x1,
+ FPNegativeInfinity = 0x2,
+ FPZero = 0x3,
+
+ // The final rounding modes are only available when explicitly specified by
+ // the instruction (such as with fcvta). It cannot be set in FPCR.
+ FPTieAway,
+ FPRoundOdd
+};
+
+enum UseDefaultNaN { kUseDefaultNaN, kIgnoreDefaultNaN };
+
+// Assemble the specified IEEE-754 components into the target type and apply
+// appropriate rounding.
+// sign: 0 = positive, 1 = negative
+// exponent: Unbiased IEEE-754 exponent.
+// mantissa: The mantissa of the input. The top bit (which is not encoded for
+// normal IEEE-754 values) must not be omitted. This bit has the
+// value 'pow(2, exponent)'.
+//
+// The input value is assumed to be a normalized value. That is, the input may
+// not be infinity or NaN. If the source value is subnormal, it must be
+// normalized before calling this function such that the highest set bit in the
+// mantissa has the value 'pow(2, exponent)'.
+//
+// Callers should use FPRoundToFloat or FPRoundToDouble directly, rather than
+// calling a templated FPRound.
+template <class T, int ebits, int mbits>
+T FPRound(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ VIXL_ASSERT((sign == 0) || (sign == 1));
+
+ // Only FPTieEven and FPRoundOdd rounding modes are implemented.
+ VIXL_ASSERT((round_mode == FPTieEven) || (round_mode == FPRoundOdd));
+
+ // Rounding can promote subnormals to normals, and normals to infinities. For
+ // example, a double with exponent 127 (FLT_MAX_EXP) would appear to be
+ // encodable as a float, but rounding based on the low-order mantissa bits
+ // could make it overflow. With ties-to-even rounding, this value would become
+ // an infinity.
+
+ // ---- Rounding Method ----
+ //
+ // The exponent is irrelevant in the rounding operation, so we treat the
+ // lowest-order bit that will fit into the result ('onebit') as having
+ // the value '1'. Similarly, the highest-order bit that won't fit into
+ // the result ('halfbit') has the value '0.5'. The 'point' sits between
+ // 'onebit' and 'halfbit':
+ //
+ // These bits fit into the result.
+ // |---------------------|
+ // mantissa = 0bxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ // ||
+ // / |
+ // / halfbit
+ // onebit
+ //
+ // For subnormal outputs, the range of representable bits is smaller and
+ // the position of onebit and halfbit depends on the exponent of the
+ // input, but the method is otherwise similar.
+ //
+ // onebit(frac)
+ // |
+ // | halfbit(frac) halfbit(adjusted)
+ // | / /
+ // | | |
+ // 0b00.0 (exact) -> 0b00.0 (exact) -> 0b00
+ // 0b00.0... -> 0b00.0... -> 0b00
+ // 0b00.1 (exact) -> 0b00.0111..111 -> 0b00
+ // 0b00.1... -> 0b00.1... -> 0b01
+ // 0b01.0 (exact) -> 0b01.0 (exact) -> 0b01
+ // 0b01.0... -> 0b01.0... -> 0b01
+ // 0b01.1 (exact) -> 0b01.1 (exact) -> 0b10
+ // 0b01.1... -> 0b01.1... -> 0b10
+ // 0b10.0 (exact) -> 0b10.0 (exact) -> 0b10
+ // 0b10.0... -> 0b10.0... -> 0b10
+ // 0b10.1 (exact) -> 0b10.0111..111 -> 0b10
+ // 0b10.1... -> 0b10.1... -> 0b11
+ // 0b11.0 (exact) -> 0b11.0 (exact) -> 0b11
+ // ... / | / |
+ // / | / |
+ // / |
+ // adjusted = frac - (halfbit(mantissa) & ~onebit(frac)); / |
+ //
+ // mantissa = (mantissa >> shift) + halfbit(adjusted);
+
+ static const int mantissa_offset = 0;
+ static const int exponent_offset = mantissa_offset + mbits;
+ static const int sign_offset = exponent_offset + ebits;
+ VIXL_ASSERT(sign_offset == (sizeof(T) * 8 - 1));
+
+ // Bail out early for zero inputs.
+ if (mantissa == 0) {
+ return static_cast<T>(sign << sign_offset);
+ }
+
+ // If all bits in the exponent are set, the value is infinite or NaN.
+ // This is true for all binary IEEE-754 formats.
+ static const int infinite_exponent = (1 << ebits) - 1;
+ static const int max_normal_exponent = infinite_exponent - 1;
+
+ // Apply the exponent bias to encode it for the result. Doing this early makes
+ // it easy to detect values that will be infinite or subnormal.
+ exponent += max_normal_exponent >> 1;
+
+ if (exponent > max_normal_exponent) {
+ // Overflow: the input is too large for the result type to represent.
+ if (round_mode == FPTieEven) {
+ // FPTieEven rounding mode handles overflows using infinities.
+ exponent = infinite_exponent;
+ mantissa = 0;
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ // FPRoundOdd rounding mode handles overflows using the largest magnitude
+ // normal number.
+ exponent = max_normal_exponent;
+ mantissa = (UINT64_C(1) << exponent_offset) - 1;
+ }
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ (mantissa << mantissa_offset));
+ }
+
+ // Calculate the shift required to move the top mantissa bit to the proper
+ // place in the destination type.
+ const int highest_significant_bit = 63 - CountLeadingZeros(mantissa);
+ int shift = highest_significant_bit - mbits;
+
+ if (exponent <= 0) {
+ // The output will be subnormal (before rounding).
+ // For subnormal outputs, the shift must be adjusted by the exponent. The +1
+ // is necessary because the exponent of a subnormal value (encoded as 0) is
+ // the same as the exponent of the smallest normal value (encoded as 1).
+ shift += -exponent + 1;
+
+ // Handle inputs that would produce a zero output.
+ //
+ // Shifts higher than highest_significant_bit+1 will always produce a zero
+ // result. A shift of exactly highest_significant_bit+1 might produce a
+ // non-zero result after rounding.
+ if (shift > (highest_significant_bit + 1)) {
+ if (round_mode == FPTieEven) {
+ // The result will always be +/-0.0.
+ return static_cast<T>(sign << sign_offset);
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ VIXL_ASSERT(mantissa != 0);
+ // For FPRoundOdd, if the mantissa is too small to represent and
+ // non-zero return the next "odd" value.
+ return static_cast<T>((sign << sign_offset) | 1);
+ }
+ }
+
+ // Properly encode the exponent for a subnormal output.
+ exponent = 0;
+ } else {
+ // Clear the topmost mantissa bit, since this is not encoded in IEEE-754
+ // normal values.
+ mantissa &= ~(UINT64_C(1) << highest_significant_bit);
+ }
+
+ // The casts below are only well-defined for unsigned integers.
+ VIXL_STATIC_ASSERT(std::numeric_limits<T>::is_integer);
+ VIXL_STATIC_ASSERT(!std::numeric_limits<T>::is_signed);
+
+ if (shift > 0) {
+ if (round_mode == FPTieEven) {
+ // We have to shift the mantissa to the right. Some precision is lost, so
+ // we need to apply rounding.
+ uint64_t onebit_mantissa = (mantissa >> (shift)) & 1;
+ uint64_t halfbit_mantissa = (mantissa >> (shift - 1)) & 1;
+ uint64_t adjustment = (halfbit_mantissa & ~onebit_mantissa);
+ uint64_t adjusted = mantissa - adjustment;
+ T halfbit_adjusted = (adjusted >> (shift - 1)) & 1;
+
+ T result =
+ static_cast<T>((sign << sign_offset) | (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
+
+ // A very large mantissa can overflow during rounding. If this happens,
+ // the exponent should be incremented and the mantissa set to 1.0
+ // (encoded as 0). Applying halfbit_adjusted after assembling the float
+ // has the nice side-effect that this case is handled for free.
+ //
+ // This also handles cases where a very large finite value overflows to
+ // infinity, or where a very large subnormal value overflows to become
+ // normal.
+ return result + halfbit_adjusted;
+ } else {
+ VIXL_ASSERT(round_mode == FPRoundOdd);
+ // If any bits at position halfbit or below are set, onebit (ie. the
+ // bottom bit of the resulting mantissa) must be set.
+ uint64_t fractional_bits = mantissa & ((UINT64_C(1) << shift) - 1);
+ if (fractional_bits != 0) {
+ mantissa |= UINT64_C(1) << shift;
+ }
+
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa >> shift) << mantissa_offset));
+ }
+ } else {
+ // We have to shift the mantissa to the left (or not at all). The input
+ // mantissa is exactly representable in the output mantissa, so apply no
+ // rounding correction.
+ return static_cast<T>((sign << sign_offset) |
+ (exponent << exponent_offset) |
+ ((mantissa << -shift) << mantissa_offset));
+ }
+}
+
+
+// See FPRound for a description of this function.
+inline double FPRoundToDouble(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ uint64_t bits =
+ FPRound<uint64_t, kDoubleExponentBits, kDoubleMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return RawbitsToDouble(bits);
+}
+
+
+// See FPRound for a description of this function.
+inline Float16 FPRoundToFloat16(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ return RawbitsToFloat16(
+ FPRound<uint16_t,
+ kFloat16ExponentBits,
+ kFloat16MantissaBits>(sign, exponent, mantissa, round_mode));
+}
+
+
+// See FPRound for a description of this function.
+static inline float FPRoundToFloat(int64_t sign,
+ int64_t exponent,
+ uint64_t mantissa,
+ FPRounding round_mode) {
+ uint32_t bits =
+ FPRound<uint32_t, kFloatExponentBits, kFloatMantissaBits>(sign,
+ exponent,
+ mantissa,
+ round_mode);
+ return RawbitsToFloat(bits);
+}
+
+
+float FPToFloat(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
+float FPToFloat(double value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception = NULL);
+
+double FPToDouble(Float16 value, UseDefaultNaN DN, bool* exception = NULL);
+double FPToDouble(float value, UseDefaultNaN DN, bool* exception = NULL);
+
+Float16 FPToFloat16(float value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception = NULL);
+
+Float16 FPToFloat16(double value,
+ FPRounding round_mode,
+ UseDefaultNaN DN,
+ bool* exception = NULL);
+} // namespace vixl
+
+#endif // VIXL_UTILS_H