summaryrefslogtreecommitdiffstats
path: root/arch/arm/probes/kprobes
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/arm/probes/kprobes
parentInitial commit. (diff)
downloadlinux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz
linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip
Adding upstream version 4.19.249.upstream/4.19.249
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/arm/probes/kprobes')
-rw-r--r--arch/arm/probes/kprobes/Makefile13
-rw-r--r--arch/arm/probes/kprobes/actions-arm.c344
-rw-r--r--arch/arm/probes/kprobes/actions-common.c171
-rw-r--r--arch/arm/probes/kprobes/actions-thumb.c670
-rw-r--r--arch/arm/probes/kprobes/checkers-arm.c192
-rw-r--r--arch/arm/probes/kprobes/checkers-common.c101
-rw-r--r--arch/arm/probes/kprobes/checkers-thumb.c110
-rw-r--r--arch/arm/probes/kprobes/checkers.h55
-rw-r--r--arch/arm/probes/kprobes/core.c565
-rw-r--r--arch/arm/probes/kprobes/core.h58
-rw-r--r--arch/arm/probes/kprobes/opt-arm.c372
-rw-r--r--arch/arm/probes/kprobes/test-arm.c1366
-rw-r--r--arch/arm/probes/kprobes/test-core.c1676
-rw-r--r--arch/arm/probes/kprobes/test-core.h458
-rw-r--r--arch/arm/probes/kprobes/test-thumb.c1200
15 files changed, 7351 insertions, 0 deletions
diff --git a/arch/arm/probes/kprobes/Makefile b/arch/arm/probes/kprobes/Makefile
new file mode 100644
index 000000000..14db56f49
--- /dev/null
+++ b/arch/arm/probes/kprobes/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_KPROBES) += core.o actions-common.o checkers-common.o
+obj-$(CONFIG_ARM_KPROBES_TEST) += test-kprobes.o
+test-kprobes-objs := test-core.o
+
+ifdef CONFIG_THUMB2_KERNEL
+obj-$(CONFIG_KPROBES) += actions-thumb.o checkers-thumb.o
+test-kprobes-objs += test-thumb.o
+else
+obj-$(CONFIG_KPROBES) += actions-arm.o checkers-arm.o
+obj-$(CONFIG_OPTPROBES) += opt-arm.o
+test-kprobes-objs += test-arm.o
+endif
diff --git a/arch/arm/probes/kprobes/actions-arm.c b/arch/arm/probes/kprobes/actions-arm.c
new file mode 100644
index 000000000..b9056d649
--- /dev/null
+++ b/arch/arm/probes/kprobes/actions-arm.c
@@ -0,0 +1,344 @@
+/*
+ * arch/arm/probes/kprobes/actions-arm.c
+ *
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+/*
+ * We do not have hardware single-stepping on ARM, This
+ * effort is further complicated by the ARM not having a
+ * "next PC" register. Instructions that change the PC
+ * can't be safely single-stepped in a MP environment, so
+ * we have a lot of work to do:
+ *
+ * In the prepare phase:
+ * *) If it is an instruction that does anything
+ * with the CPU mode, we reject it for a kprobe.
+ * (This is out of laziness rather than need. The
+ * instructions could be simulated.)
+ *
+ * *) Otherwise, decode the instruction rewriting its
+ * registers to take fixed, ordered registers and
+ * setting a handler for it to run the instruction.
+ *
+ * In the execution phase by an instruction's handler:
+ *
+ * *) If the PC is written to by the instruction, the
+ * instruction must be fully simulated in software.
+ *
+ * *) Otherwise, a modified form of the instruction is
+ * directly executed. Its handler calls the
+ * instruction in insn[0]. In insn[1] is a
+ * "mov pc, lr" to return.
+ *
+ * Before calling, load up the reordered registers
+ * from the original instruction's registers. If one
+ * of the original input registers is the PC, compute
+ * and adjust the appropriate input register.
+ *
+ * After call completes, copy the output registers to
+ * the original instruction's original registers.
+ *
+ * We don't use a real breakpoint instruction since that
+ * would have us in the kernel go from SVC mode to SVC
+ * mode losing the link register. Instead we use an
+ * undefined instruction. To simplify processing, the
+ * undefined instruction used for kprobes must be reserved
+ * exclusively for kprobes use.
+ *
+ * TODO: ifdef out some instruction decoding based on architecture.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/ptrace.h>
+
+#include "../decode-arm.h"
+#include "core.h"
+#include "checkers.h"
+
+#if __LINUX_ARM_ARCH__ >= 6
+#define BLX(reg) "blx "reg" \n\t"
+#else
+#define BLX(reg) "mov lr, pc \n\t" \
+ "mov pc, "reg" \n\t"
+#endif
+
+static void __kprobes
+emulate_ldrdstrd(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc + 4;
+ int rt = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rtv asm("r0") = regs->uregs[rt];
+ register unsigned long rt2v asm("r1") = regs->uregs[rt+1];
+ register unsigned long rnv asm("r2") = (rn == 15) ? pc
+ : regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ BLX("%[fn]")
+ : "=r" (rtv), "=r" (rt2v), "=r" (rnv)
+ : "0" (rtv), "1" (rt2v), "2" (rnv), "r" (rmv),
+ [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rt] = rtv;
+ regs->uregs[rt+1] = rt2v;
+ if (is_writeback(insn))
+ regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_ldr(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc + 4;
+ int rt = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rtv asm("r0");
+ register unsigned long rnv asm("r2") = (rn == 15) ? pc
+ : regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ BLX("%[fn]")
+ : "=r" (rtv), "=r" (rnv)
+ : "1" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (rt == 15)
+ load_write_pc(rtv, regs);
+ else
+ regs->uregs[rt] = rtv;
+
+ if (is_writeback(insn))
+ regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_str(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long rtpc = regs->ARM_pc - 4 + str_pc_offset;
+ unsigned long rnpc = regs->ARM_pc + 4;
+ int rt = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rtv asm("r0") = (rt == 15) ? rtpc
+ : regs->uregs[rt];
+ register unsigned long rnv asm("r2") = (rn == 15) ? rnpc
+ : regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ BLX("%[fn]")
+ : "=r" (rnv)
+ : "r" (rtv), "0" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (is_writeback(insn))
+ regs->uregs[rn] = rnv;
+}
+
+static void __kprobes
+emulate_rd12rn16rm0rs8_rwflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc + 4;
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+ int rs = (insn >> 8) & 0xf;
+
+ register unsigned long rdv asm("r0") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = (rn == 15) ? pc
+ : regs->uregs[rn];
+ register unsigned long rmv asm("r3") = (rm == 15) ? pc
+ : regs->uregs[rm];
+ register unsigned long rsv asm("r1") = regs->uregs[rs];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ BLX("%[fn]")
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdv), [cpsr] "=r" (cpsr)
+ : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
+ "1" (cpsr), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (rd == 15)
+ alu_write_pc(rdv, regs);
+ else
+ regs->uregs[rd] = rdv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd12rn16rm0_rwflags_nopc(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ int rd = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rdv asm("r0") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ BLX("%[fn]")
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdv), [cpsr] "=r" (cpsr)
+ : "0" (rdv), "r" (rnv), "r" (rmv),
+ "1" (cpsr), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd16rn12rm0rs8_rwflags_nopc(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ struct pt_regs *regs)
+{
+ int rd = (insn >> 16) & 0xf;
+ int rn = (insn >> 12) & 0xf;
+ int rm = insn & 0xf;
+ int rs = (insn >> 8) & 0xf;
+
+ register unsigned long rdv asm("r2") = regs->uregs[rd];
+ register unsigned long rnv asm("r0") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+ register unsigned long rsv asm("r1") = regs->uregs[rs];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ BLX("%[fn]")
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdv), [cpsr] "=r" (cpsr)
+ : "0" (rdv), "r" (rnv), "r" (rmv), "r" (rsv),
+ "1" (cpsr), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+emulate_rd12rm0_noflags_nopc(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ int rd = (insn >> 12) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rdv asm("r0") = regs->uregs[rd];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ BLX("%[fn]")
+ : "=r" (rdv)
+ : "0" (rdv), "r" (rmv), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+emulate_rdlo12rdhi16rn0rm8_rwflags_nopc(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ struct pt_regs *regs)
+{
+ int rdlo = (insn >> 12) & 0xf;
+ int rdhi = (insn >> 16) & 0xf;
+ int rn = insn & 0xf;
+ int rm = (insn >> 8) & 0xf;
+
+ register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
+ register unsigned long rdhiv asm("r2") = regs->uregs[rdhi];
+ register unsigned long rnv asm("r3") = regs->uregs[rn];
+ register unsigned long rmv asm("r1") = regs->uregs[rm];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ BLX("%[fn]")
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdlov), "=r" (rdhiv), [cpsr] "=r" (cpsr)
+ : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
+ "2" (cpsr), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rdlo] = rdlov;
+ regs->uregs[rdhi] = rdhiv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+const union decode_action kprobes_arm_actions[NUM_PROBES_ARM_ACTIONS] = {
+ [PROBES_PRELOAD_IMM] = {.handler = probes_simulate_nop},
+ [PROBES_PRELOAD_REG] = {.handler = probes_simulate_nop},
+ [PROBES_BRANCH_IMM] = {.handler = simulate_blx1},
+ [PROBES_MRS] = {.handler = simulate_mrs},
+ [PROBES_BRANCH_REG] = {.handler = simulate_blx2bx},
+ [PROBES_CLZ] = {.handler = emulate_rd12rm0_noflags_nopc},
+ [PROBES_SATURATING_ARITHMETIC] = {
+ .handler = emulate_rd12rn16rm0_rwflags_nopc},
+ [PROBES_MUL1] = {.handler = emulate_rdlo12rdhi16rn0rm8_rwflags_nopc},
+ [PROBES_MUL2] = {.handler = emulate_rd16rn12rm0rs8_rwflags_nopc},
+ [PROBES_SWP] = {.handler = emulate_rd12rn16rm0_rwflags_nopc},
+ [PROBES_LDRSTRD] = {.handler = emulate_ldrdstrd},
+ [PROBES_LOAD_EXTRA] = {.handler = emulate_ldr},
+ [PROBES_LOAD] = {.handler = emulate_ldr},
+ [PROBES_STORE_EXTRA] = {.handler = emulate_str},
+ [PROBES_STORE] = {.handler = emulate_str},
+ [PROBES_MOV_IP_SP] = {.handler = simulate_mov_ipsp},
+ [PROBES_DATA_PROCESSING_REG] = {
+ .handler = emulate_rd12rn16rm0rs8_rwflags},
+ [PROBES_DATA_PROCESSING_IMM] = {
+ .handler = emulate_rd12rn16rm0rs8_rwflags},
+ [PROBES_MOV_HALFWORD] = {.handler = emulate_rd12rm0_noflags_nopc},
+ [PROBES_SEV] = {.handler = probes_emulate_none},
+ [PROBES_WFE] = {.handler = probes_simulate_nop},
+ [PROBES_SATURATE] = {.handler = emulate_rd12rn16rm0_rwflags_nopc},
+ [PROBES_REV] = {.handler = emulate_rd12rm0_noflags_nopc},
+ [PROBES_MMI] = {.handler = emulate_rd12rn16rm0_rwflags_nopc},
+ [PROBES_PACK] = {.handler = emulate_rd12rn16rm0_rwflags_nopc},
+ [PROBES_EXTEND] = {.handler = emulate_rd12rm0_noflags_nopc},
+ [PROBES_EXTEND_ADD] = {.handler = emulate_rd12rn16rm0_rwflags_nopc},
+ [PROBES_MUL_ADD_LONG] = {
+ .handler = emulate_rdlo12rdhi16rn0rm8_rwflags_nopc},
+ [PROBES_MUL_ADD] = {.handler = emulate_rd16rn12rm0rs8_rwflags_nopc},
+ [PROBES_BITFIELD] = {.handler = emulate_rd12rm0_noflags_nopc},
+ [PROBES_BRANCH] = {.handler = simulate_bbl},
+ [PROBES_LDMSTM] = {.decoder = kprobe_decode_ldmstm}
+};
+
+const struct decode_checker *kprobes_arm_checkers[] = {arm_stack_checker, arm_regs_checker, NULL};
diff --git a/arch/arm/probes/kprobes/actions-common.c b/arch/arm/probes/kprobes/actions-common.c
new file mode 100644
index 000000000..bd20a71cd
--- /dev/null
+++ b/arch/arm/probes/kprobes/actions-common.c
@@ -0,0 +1,171 @@
+/*
+ * arch/arm/probes/kprobes/actions-common.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * Some contents moved here from arch/arm/include/asm/kprobes-arm.c which is
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <asm/opcodes.h>
+
+#include "core.h"
+
+
+static void __kprobes simulate_ldm1stm1(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ struct pt_regs *regs)
+{
+ int rn = (insn >> 16) & 0xf;
+ int lbit = insn & (1 << 20);
+ int wbit = insn & (1 << 21);
+ int ubit = insn & (1 << 23);
+ int pbit = insn & (1 << 24);
+ long *addr = (long *)regs->uregs[rn];
+ int reg_bit_vector;
+ int reg_count;
+
+ reg_count = 0;
+ reg_bit_vector = insn & 0xffff;
+ while (reg_bit_vector) {
+ reg_bit_vector &= (reg_bit_vector - 1);
+ ++reg_count;
+ }
+
+ if (!ubit)
+ addr -= reg_count;
+ addr += (!pbit == !ubit);
+
+ reg_bit_vector = insn & 0xffff;
+ while (reg_bit_vector) {
+ int reg = __ffs(reg_bit_vector);
+ reg_bit_vector &= (reg_bit_vector - 1);
+ if (lbit)
+ regs->uregs[reg] = *addr++;
+ else
+ *addr++ = regs->uregs[reg];
+ }
+
+ if (wbit) {
+ if (!ubit)
+ addr -= reg_count;
+ addr -= (!pbit == !ubit);
+ regs->uregs[rn] = (long)addr;
+ }
+}
+
+static void __kprobes simulate_stm1_pc(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ struct pt_regs *regs)
+{
+ unsigned long addr = regs->ARM_pc - 4;
+
+ regs->ARM_pc = (long)addr + str_pc_offset;
+ simulate_ldm1stm1(insn, asi, regs);
+ regs->ARM_pc = (long)addr + 4;
+}
+
+static void __kprobes simulate_ldm1_pc(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ struct pt_regs *regs)
+{
+ simulate_ldm1stm1(insn, asi, regs);
+ load_write_pc(regs->ARM_pc, regs);
+}
+
+static void __kprobes
+emulate_generic_r0_12_noflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ register void *rregs asm("r1") = regs;
+ register void *rfn asm("lr") = asi->insn_fn;
+
+ __asm__ __volatile__ (
+ "stmdb sp!, {%[regs], r11} \n\t"
+ "ldmia %[regs], {r0-r12} \n\t"
+#if __LINUX_ARM_ARCH__ >= 6
+ "blx %[fn] \n\t"
+#else
+ "str %[fn], [sp, #-4]! \n\t"
+ "adr lr, 1f \n\t"
+ "ldr pc, [sp], #4 \n\t"
+ "1: \n\t"
+#endif
+ "ldr lr, [sp], #4 \n\t" /* lr = regs */
+ "stmia lr, {r0-r12} \n\t"
+ "ldr r11, [sp], #4 \n\t"
+ : [regs] "=r" (rregs), [fn] "=r" (rfn)
+ : "0" (rregs), "1" (rfn)
+ : "r0", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r12", "memory", "cc"
+ );
+}
+
+static void __kprobes
+emulate_generic_r2_14_noflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ emulate_generic_r0_12_noflags(insn, asi,
+ (struct pt_regs *)(regs->uregs+2));
+}
+
+static void __kprobes
+emulate_ldm_r3_15(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ emulate_generic_r0_12_noflags(insn, asi,
+ (struct pt_regs *)(regs->uregs+3));
+ load_write_pc(regs->ARM_pc, regs);
+}
+
+enum probes_insn __kprobes
+kprobe_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ probes_insn_handler_t *handler = 0;
+ unsigned reglist = insn & 0xffff;
+ int is_ldm = insn & 0x100000;
+ int rn = (insn >> 16) & 0xf;
+
+ if (rn <= 12 && (reglist & 0xe000) == 0) {
+ /* Instruction only uses registers in the range R0..R12 */
+ handler = emulate_generic_r0_12_noflags;
+
+ } else if (rn >= 2 && (reglist & 0x8003) == 0) {
+ /* Instruction only uses registers in the range R2..R14 */
+ rn -= 2;
+ reglist >>= 2;
+ handler = emulate_generic_r2_14_noflags;
+
+ } else if (rn >= 3 && (reglist & 0x0007) == 0) {
+ /* Instruction only uses registers in the range R3..R15 */
+ if (is_ldm && (reglist & 0x8000)) {
+ rn -= 3;
+ reglist >>= 3;
+ handler = emulate_ldm_r3_15;
+ }
+ }
+
+ if (handler) {
+ /* We can emulate the instruction in (possibly) modified form */
+ asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
+ (rn << 16) | reglist);
+ asi->insn_handler = handler;
+ return INSN_GOOD;
+ }
+
+ /* Fallback to slower simulation... */
+ if (reglist & 0x8000)
+ handler = is_ldm ? simulate_ldm1_pc : simulate_stm1_pc;
+ else
+ handler = simulate_ldm1stm1;
+ asi->insn_handler = handler;
+ return INSN_GOOD_NO_SLOT;
+}
+
diff --git a/arch/arm/probes/kprobes/actions-thumb.c b/arch/arm/probes/kprobes/actions-thumb.c
new file mode 100644
index 000000000..07cfd9bef
--- /dev/null
+++ b/arch/arm/probes/kprobes/actions-thumb.c
@@ -0,0 +1,670 @@
+/*
+ * arch/arm/probes/kprobes/actions-thumb.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/kprobes.h>
+
+#include "../decode-thumb.h"
+#include "core.h"
+#include "checkers.h"
+
+/* These emulation encodings are functionally equivalent... */
+#define t32_emulate_rd8rn16rm0ra12_noflags \
+ t32_emulate_rdlo12rdhi8rn16rm0_noflags
+
+/* t32 thumb actions */
+
+static void __kprobes
+t32_simulate_table_branch(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ unsigned long rnv = (rn == 15) ? pc : regs->uregs[rn];
+ unsigned long rmv = regs->uregs[rm];
+ unsigned int halfwords;
+
+ if (insn & 0x10) /* TBH */
+ halfwords = ((u16 *)rnv)[rmv];
+ else /* TBB */
+ halfwords = ((u8 *)rnv)[rmv];
+
+ regs->ARM_pc = pc + 2 * halfwords;
+}
+
+static void __kprobes
+t32_simulate_mrs(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ int rd = (insn >> 8) & 0xf;
+ unsigned long mask = 0xf8ff03df; /* Mask out execution state */
+ regs->uregs[rd] = regs->ARM_cpsr & mask;
+}
+
+static void __kprobes
+t32_simulate_cond_branch(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc;
+
+ long offset = insn & 0x7ff; /* imm11 */
+ offset += (insn & 0x003f0000) >> 5; /* imm6 */
+ offset += (insn & 0x00002000) << 4; /* J1 */
+ offset += (insn & 0x00000800) << 7; /* J2 */
+ offset -= (insn & 0x04000000) >> 7; /* Apply sign bit */
+
+ regs->ARM_pc = pc + (offset * 2);
+}
+
+static enum probes_insn __kprobes
+t32_decode_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *d)
+{
+ int cc = (insn >> 22) & 0xf;
+ asi->insn_check_cc = probes_condition_checks[cc];
+ asi->insn_handler = t32_simulate_cond_branch;
+ return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t32_simulate_branch(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc;
+
+ long offset = insn & 0x7ff; /* imm11 */
+ offset += (insn & 0x03ff0000) >> 5; /* imm10 */
+ offset += (insn & 0x00002000) << 9; /* J1 */
+ offset += (insn & 0x00000800) << 10; /* J2 */
+ if (insn & 0x04000000)
+ offset -= 0x00800000; /* Apply sign bit */
+ else
+ offset ^= 0x00600000; /* Invert J1 and J2 */
+
+ if (insn & (1 << 14)) {
+ /* BL or BLX */
+ regs->ARM_lr = regs->ARM_pc | 1;
+ if (!(insn & (1 << 12))) {
+ /* BLX so switch to ARM mode */
+ regs->ARM_cpsr &= ~PSR_T_BIT;
+ pc &= ~3;
+ }
+ }
+
+ regs->ARM_pc = pc + (offset * 2);
+}
+
+static void __kprobes
+t32_simulate_ldr_literal(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long addr = regs->ARM_pc & ~3;
+ int rt = (insn >> 12) & 0xf;
+ unsigned long rtv;
+
+ long offset = insn & 0xfff;
+ if (insn & 0x00800000)
+ addr += offset;
+ else
+ addr -= offset;
+
+ if (insn & 0x00400000) {
+ /* LDR */
+ rtv = *(unsigned long *)addr;
+ if (rt == 15) {
+ bx_write_pc(rtv, regs);
+ return;
+ }
+ } else if (insn & 0x00200000) {
+ /* LDRH */
+ if (insn & 0x01000000)
+ rtv = *(s16 *)addr;
+ else
+ rtv = *(u16 *)addr;
+ } else {
+ /* LDRB */
+ if (insn & 0x01000000)
+ rtv = *(s8 *)addr;
+ else
+ rtv = *(u8 *)addr;
+ }
+
+ regs->uregs[rt] = rtv;
+}
+
+static enum probes_insn __kprobes
+t32_decode_ldmstm(probes_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *d)
+{
+ enum probes_insn ret = kprobe_decode_ldmstm(insn, asi, d);
+
+ /* Fixup modified instruction to have halfwords in correct order...*/
+ insn = __mem_to_opcode_arm(asi->insn[0]);
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
+
+ return ret;
+}
+
+static void __kprobes
+t32_emulate_ldrdstrd(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc & ~3;
+ int rt1 = (insn >> 12) & 0xf;
+ int rt2 = (insn >> 8) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+
+ register unsigned long rt1v asm("r0") = regs->uregs[rt1];
+ register unsigned long rt2v asm("r1") = regs->uregs[rt2];
+ register unsigned long rnv asm("r2") = (rn == 15) ? pc
+ : regs->uregs[rn];
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rt1v), "=r" (rt2v), "=r" (rnv)
+ : "0" (rt1v), "1" (rt2v), "2" (rnv), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (rn != 15)
+ regs->uregs[rn] = rnv; /* Writeback base register */
+ regs->uregs[rt1] = rt1v;
+ regs->uregs[rt2] = rt2v;
+}
+
+static void __kprobes
+t32_emulate_ldrstr(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ int rt = (insn >> 12) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rtv asm("r0") = regs->uregs[rt];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rtv), "=r" (rnv)
+ : "0" (rtv), "1" (rnv), "r" (rmv), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rn] = rnv; /* Writeback base register */
+ if (rt == 15) /* Can't be true for a STR as they aren't allowed */
+ bx_write_pc(rtv, regs);
+ else
+ regs->uregs[rt] = rtv;
+}
+
+static void __kprobes
+t32_emulate_rd8rn16rm0_rwflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ int rd = (insn >> 8) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rdv asm("r1") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "blx %[fn] \n\t"
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdv), [cpsr] "=r" (cpsr)
+ : "0" (rdv), "r" (rnv), "r" (rmv),
+ "1" (cpsr), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static void __kprobes
+t32_emulate_rd8pc16_noflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc;
+ int rd = (insn >> 8) & 0xf;
+
+ register unsigned long rdv asm("r1") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = pc & ~3;
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rdv)
+ : "0" (rdv), "r" (rnv), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+t32_emulate_rd8rn16_noflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ int rd = (insn >> 8) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+
+ register unsigned long rdv asm("r1") = regs->uregs[rd];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rdv)
+ : "0" (rdv), "r" (rnv), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rd] = rdv;
+}
+
+static void __kprobes
+t32_emulate_rdlo12rdhi8rn16rm0_noflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ struct pt_regs *regs)
+{
+ int rdlo = (insn >> 12) & 0xf;
+ int rdhi = (insn >> 8) & 0xf;
+ int rn = (insn >> 16) & 0xf;
+ int rm = insn & 0xf;
+
+ register unsigned long rdlov asm("r0") = regs->uregs[rdlo];
+ register unsigned long rdhiv asm("r1") = regs->uregs[rdhi];
+ register unsigned long rnv asm("r2") = regs->uregs[rn];
+ register unsigned long rmv asm("r3") = regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ "blx %[fn]"
+ : "=r" (rdlov), "=r" (rdhiv)
+ : "0" (rdlov), "1" (rdhiv), "r" (rnv), "r" (rmv),
+ [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ regs->uregs[rdlo] = rdlov;
+ regs->uregs[rdhi] = rdhiv;
+}
+/* t16 thumb actions */
+
+static void __kprobes
+t16_simulate_bxblx(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc + 2;
+ int rm = (insn >> 3) & 0xf;
+ unsigned long rmv = (rm == 15) ? pc : regs->uregs[rm];
+
+ if (insn & (1 << 7)) /* BLX ? */
+ regs->ARM_lr = regs->ARM_pc | 1;
+
+ bx_write_pc(rmv, regs);
+}
+
+static void __kprobes
+t16_simulate_ldr_literal(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long *base = (unsigned long *)((regs->ARM_pc + 2) & ~3);
+ long index = insn & 0xff;
+ int rt = (insn >> 8) & 0x7;
+ regs->uregs[rt] = base[index];
+}
+
+static void __kprobes
+t16_simulate_ldrstr_sp_relative(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long* base = (unsigned long *)regs->ARM_sp;
+ long index = insn & 0xff;
+ int rt = (insn >> 8) & 0x7;
+ if (insn & 0x800) /* LDR */
+ regs->uregs[rt] = base[index];
+ else /* STR */
+ base[index] = regs->uregs[rt];
+}
+
+static void __kprobes
+t16_simulate_reladr(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long base = (insn & 0x800) ? regs->ARM_sp
+ : ((regs->ARM_pc + 2) & ~3);
+ long offset = insn & 0xff;
+ int rt = (insn >> 8) & 0x7;
+ regs->uregs[rt] = base + offset * 4;
+}
+
+static void __kprobes
+t16_simulate_add_sp_imm(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ long imm = insn & 0x7f;
+ if (insn & 0x80) /* SUB */
+ regs->ARM_sp -= imm * 4;
+ else /* ADD */
+ regs->ARM_sp += imm * 4;
+}
+
+static void __kprobes
+t16_simulate_cbz(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ int rn = insn & 0x7;
+ probes_opcode_t nonzero = regs->uregs[rn] ? insn : ~insn;
+ if (nonzero & 0x800) {
+ long i = insn & 0x200;
+ long imm5 = insn & 0xf8;
+ unsigned long pc = regs->ARM_pc + 2;
+ regs->ARM_pc = pc + (i >> 3) + (imm5 >> 2);
+ }
+}
+
+static void __kprobes
+t16_simulate_it(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ /*
+ * The 8 IT state bits are split into two parts in CPSR:
+ * ITSTATE<1:0> are in CPSR<26:25>
+ * ITSTATE<7:2> are in CPSR<15:10>
+ * The new IT state is in the lower byte of insn.
+ */
+ unsigned long cpsr = regs->ARM_cpsr;
+ cpsr &= ~PSR_IT_MASK;
+ cpsr |= (insn & 0xfc) << 8;
+ cpsr |= (insn & 0x03) << 25;
+ regs->ARM_cpsr = cpsr;
+}
+
+static void __kprobes
+t16_singlestep_it(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ regs->ARM_pc += 2;
+ t16_simulate_it(insn, asi, regs);
+}
+
+static enum probes_insn __kprobes
+t16_decode_it(probes_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *d)
+{
+ asi->insn_singlestep = t16_singlestep_it;
+ return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t16_simulate_cond_branch(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc + 2;
+ long offset = insn & 0x7f;
+ offset -= insn & 0x80; /* Apply sign bit */
+ regs->ARM_pc = pc + (offset * 2);
+}
+
+static enum probes_insn __kprobes
+t16_decode_cond_branch(probes_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *d)
+{
+ int cc = (insn >> 8) & 0xf;
+ asi->insn_check_cc = probes_condition_checks[cc];
+ asi->insn_handler = t16_simulate_cond_branch;
+ return INSN_GOOD_NO_SLOT;
+}
+
+static void __kprobes
+t16_simulate_branch(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc + 2;
+ long offset = insn & 0x3ff;
+ offset -= insn & 0x400; /* Apply sign bit */
+ regs->ARM_pc = pc + (offset * 2);
+}
+
+static unsigned long __kprobes
+t16_emulate_loregs(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long oldcpsr = regs->ARM_cpsr;
+ unsigned long newcpsr;
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[oldcpsr] \n\t"
+ "ldmia %[regs], {r0-r7} \n\t"
+ "blx %[fn] \n\t"
+ "stmia %[regs], {r0-r7} \n\t"
+ "mrs %[newcpsr], cpsr \n\t"
+ : [newcpsr] "=r" (newcpsr)
+ : [oldcpsr] "r" (oldcpsr), [regs] "r" (regs),
+ [fn] "r" (asi->insn_fn)
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "lr", "memory", "cc"
+ );
+
+ return (oldcpsr & ~APSR_MASK) | (newcpsr & APSR_MASK);
+}
+
+static void __kprobes
+t16_emulate_loregs_rwflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ regs->ARM_cpsr = t16_emulate_loregs(insn, asi, regs);
+}
+
+static void __kprobes
+t16_emulate_loregs_noitrwflags(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long cpsr = t16_emulate_loregs(insn, asi, regs);
+ if (!in_it_block(cpsr))
+ regs->ARM_cpsr = cpsr;
+}
+
+static void __kprobes
+t16_emulate_hiregs(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ unsigned long pc = regs->ARM_pc + 2;
+ int rdn = (insn & 0x7) | ((insn & 0x80) >> 4);
+ int rm = (insn >> 3) & 0xf;
+
+ register unsigned long rdnv asm("r1");
+ register unsigned long rmv asm("r0");
+ unsigned long cpsr = regs->ARM_cpsr;
+
+ rdnv = (rdn == 15) ? pc : regs->uregs[rdn];
+ rmv = (rm == 15) ? pc : regs->uregs[rm];
+
+ __asm__ __volatile__ (
+ "msr cpsr_fs, %[cpsr] \n\t"
+ "blx %[fn] \n\t"
+ "mrs %[cpsr], cpsr \n\t"
+ : "=r" (rdnv), [cpsr] "=r" (cpsr)
+ : "0" (rdnv), "r" (rmv), "1" (cpsr), [fn] "r" (asi->insn_fn)
+ : "lr", "memory", "cc"
+ );
+
+ if (rdn == 15)
+ rdnv &= ~1;
+
+ regs->uregs[rdn] = rdnv;
+ regs->ARM_cpsr = (regs->ARM_cpsr & ~APSR_MASK) | (cpsr & APSR_MASK);
+}
+
+static enum probes_insn __kprobes
+t16_decode_hiregs(probes_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *d)
+{
+ insn &= ~0x00ff;
+ insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
+ asi->insn_handler = t16_emulate_hiregs;
+ return INSN_GOOD;
+}
+
+static void __kprobes
+t16_emulate_push(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ __asm__ __volatile__ (
+ "ldr r9, [%[regs], #13*4] \n\t"
+ "ldr r8, [%[regs], #14*4] \n\t"
+ "ldmia %[regs], {r0-r7} \n\t"
+ "blx %[fn] \n\t"
+ "str r9, [%[regs], #13*4] \n\t"
+ :
+ : [regs] "r" (regs), [fn] "r" (asi->insn_fn)
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9",
+ "lr", "memory", "cc"
+ );
+}
+
+static enum probes_insn __kprobes
+t16_decode_push(probes_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *d)
+{
+ /*
+ * To simulate a PUSH we use a Thumb-2 "STMDB R9!, {registers}"
+ * and call it with R9=SP and LR in the register list represented
+ * by R8.
+ */
+ /* 1st half STMDB R9!,{} */
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
+ /* 2nd half (register list) */
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
+ asi->insn_handler = t16_emulate_push;
+ return INSN_GOOD;
+}
+
+static void __kprobes
+t16_emulate_pop_nopc(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ __asm__ __volatile__ (
+ "ldr r9, [%[regs], #13*4] \n\t"
+ "ldmia %[regs], {r0-r7} \n\t"
+ "blx %[fn] \n\t"
+ "stmia %[regs], {r0-r7} \n\t"
+ "str r9, [%[regs], #13*4] \n\t"
+ :
+ : [regs] "r" (regs), [fn] "r" (asi->insn_fn)
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
+ "lr", "memory", "cc"
+ );
+}
+
+static void __kprobes
+t16_emulate_pop_pc(probes_opcode_t insn,
+ struct arch_probes_insn *asi, struct pt_regs *regs)
+{
+ register unsigned long pc asm("r8");
+
+ __asm__ __volatile__ (
+ "ldr r9, [%[regs], #13*4] \n\t"
+ "ldmia %[regs], {r0-r7} \n\t"
+ "blx %[fn] \n\t"
+ "stmia %[regs], {r0-r7} \n\t"
+ "str r9, [%[regs], #13*4] \n\t"
+ : "=r" (pc)
+ : [regs] "r" (regs), [fn] "r" (asi->insn_fn)
+ : "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r9",
+ "lr", "memory", "cc"
+ );
+
+ bx_write_pc(pc, regs);
+}
+
+static enum probes_insn __kprobes
+t16_decode_pop(probes_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *d)
+{
+ /*
+ * To simulate a POP we use a Thumb-2 "LDMDB R9!, {registers}"
+ * and call it with R9=SP and PC in the register list represented
+ * by R8.
+ */
+ /* 1st half LDMIA R9!,{} */
+ ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
+ /* 2nd half (register list) */
+ ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
+ asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
+ : t16_emulate_pop_nopc;
+ return INSN_GOOD;
+}
+
+const union decode_action kprobes_t16_actions[NUM_PROBES_T16_ACTIONS] = {
+ [PROBES_T16_ADD_SP] = {.handler = t16_simulate_add_sp_imm},
+ [PROBES_T16_CBZ] = {.handler = t16_simulate_cbz},
+ [PROBES_T16_SIGN_EXTEND] = {.handler = t16_emulate_loregs_rwflags},
+ [PROBES_T16_PUSH] = {.decoder = t16_decode_push},
+ [PROBES_T16_POP] = {.decoder = t16_decode_pop},
+ [PROBES_T16_SEV] = {.handler = probes_emulate_none},
+ [PROBES_T16_WFE] = {.handler = probes_simulate_nop},
+ [PROBES_T16_IT] = {.decoder = t16_decode_it},
+ [PROBES_T16_CMP] = {.handler = t16_emulate_loregs_rwflags},
+ [PROBES_T16_ADDSUB] = {.handler = t16_emulate_loregs_noitrwflags},
+ [PROBES_T16_LOGICAL] = {.handler = t16_emulate_loregs_noitrwflags},
+ [PROBES_T16_LDR_LIT] = {.handler = t16_simulate_ldr_literal},
+ [PROBES_T16_BLX] = {.handler = t16_simulate_bxblx},
+ [PROBES_T16_HIREGOPS] = {.decoder = t16_decode_hiregs},
+ [PROBES_T16_LDRHSTRH] = {.handler = t16_emulate_loregs_rwflags},
+ [PROBES_T16_LDRSTR] = {.handler = t16_simulate_ldrstr_sp_relative},
+ [PROBES_T16_ADR] = {.handler = t16_simulate_reladr},
+ [PROBES_T16_LDMSTM] = {.handler = t16_emulate_loregs_rwflags},
+ [PROBES_T16_BRANCH_COND] = {.decoder = t16_decode_cond_branch},
+ [PROBES_T16_BRANCH] = {.handler = t16_simulate_branch},
+};
+
+const union decode_action kprobes_t32_actions[NUM_PROBES_T32_ACTIONS] = {
+ [PROBES_T32_LDMSTM] = {.decoder = t32_decode_ldmstm},
+ [PROBES_T32_LDRDSTRD] = {.handler = t32_emulate_ldrdstrd},
+ [PROBES_T32_TABLE_BRANCH] = {.handler = t32_simulate_table_branch},
+ [PROBES_T32_TST] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_MOV] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_ADDSUB] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_LOGICAL] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_CMP] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_ADDWSUBW_PC] = {.handler = t32_emulate_rd8pc16_noflags,},
+ [PROBES_T32_ADDWSUBW] = {.handler = t32_emulate_rd8rn16_noflags},
+ [PROBES_T32_MOVW] = {.handler = t32_emulate_rd8rn16_noflags},
+ [PROBES_T32_SAT] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_BITFIELD] = {.handler = t32_emulate_rd8rn16_noflags},
+ [PROBES_T32_SEV] = {.handler = probes_emulate_none},
+ [PROBES_T32_WFE] = {.handler = probes_simulate_nop},
+ [PROBES_T32_MRS] = {.handler = t32_simulate_mrs},
+ [PROBES_T32_BRANCH_COND] = {.decoder = t32_decode_cond_branch},
+ [PROBES_T32_BRANCH] = {.handler = t32_simulate_branch},
+ [PROBES_T32_PLDI] = {.handler = probes_simulate_nop},
+ [PROBES_T32_LDR_LIT] = {.handler = t32_simulate_ldr_literal},
+ [PROBES_T32_LDRSTR] = {.handler = t32_emulate_ldrstr},
+ [PROBES_T32_SIGN_EXTEND] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_MEDIA] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_REVERSE] = {.handler = t32_emulate_rd8rn16_noflags},
+ [PROBES_T32_MUL_ADD] = {.handler = t32_emulate_rd8rn16rm0_rwflags},
+ [PROBES_T32_MUL_ADD2] = {.handler = t32_emulate_rd8rn16rm0ra12_noflags},
+ [PROBES_T32_MUL_ADD_LONG] = {
+ .handler = t32_emulate_rdlo12rdhi8rn16rm0_noflags},
+};
+
+const struct decode_checker *kprobes_t32_checkers[] = {t32_stack_checker, NULL};
+const struct decode_checker *kprobes_t16_checkers[] = {t16_stack_checker, NULL};
diff --git a/arch/arm/probes/kprobes/checkers-arm.c b/arch/arm/probes/kprobes/checkers-arm.c
new file mode 100644
index 000000000..7b9817333
--- /dev/null
+++ b/arch/arm/probes/kprobes/checkers-arm.c
@@ -0,0 +1,192 @@
+/*
+ * arch/arm/probes/kprobes/checkers-arm.c
+ *
+ * Copyright (C) 2014 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "../decode.h"
+#include "../decode-arm.h"
+#include "checkers.h"
+
+static enum probes_insn __kprobes arm_check_stack(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ /*
+ * PROBES_LDRSTRD, PROBES_LDMSTM, PROBES_STORE,
+ * PROBES_STORE_EXTRA may get here. Simply mark all normal
+ * insns as STACK_USE_NONE.
+ */
+ static const union decode_item table[] = {
+ /*
+ * 'STR{,D,B,H}, Rt, [Rn, Rm]' should be marked as UNKNOWN
+ * if Rn or Rm is SP.
+ * x
+ * STR (register) cccc 011x x0x0 xxxx xxxx xxxx xxxx xxxx
+ * STRB (register) cccc 011x x1x0 xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_OR (0x0e10000f, 0x0600000d),
+ DECODE_OR (0x0e1f0000, 0x060d0000),
+
+ /*
+ * x
+ * STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx
+ * STRH (register) cccc 000x x0x0 xxxx xxxx xxxx 1011 xxxx
+ */
+ DECODE_OR (0x0e5000bf, 0x000000bd),
+ DECODE_CUSTOM (0x0e5f00b0, 0x000d00b0, STACK_USE_UNKNOWN),
+
+ /*
+ * For PROBES_LDMSTM, only stmdx sp, [...] need to examine
+ *
+ * Bit B/A (bit 24) encodes arithmetic operation order. 1 means
+ * before, 0 means after.
+ * Bit I/D (bit 23) encodes arithmetic operation. 1 means
+ * increment, 0 means decrement.
+ *
+ * So:
+ * B I
+ * / /
+ * A D | Rn |
+ * STMDX SP, [...] cccc 100x 00x0 xxxx xxxx xxxx xxxx xxxx
+ */
+ DECODE_CUSTOM (0x0edf0000, 0x080d0000, STACK_USE_STMDX),
+
+ /* P U W | Rn | Rt | imm12 |*/
+ /* STR (immediate) cccc 010x x0x0 1101 xxxx xxxx xxxx xxxx */
+ /* STRB (immediate) cccc 010x x1x0 1101 xxxx xxxx xxxx xxxx */
+ /* P U W | Rn | Rt |imm4| |imm4|*/
+ /* STRD (immediate) cccc 000x x1x0 1101 xxxx xxxx 1111 xxxx */
+ /* STRH (immediate) cccc 000x x1x0 1101 xxxx xxxx 1011 xxxx */
+ /*
+ * index = (P == '1'); add = (U == '1').
+ * Above insns with:
+ * index == 0 (str{,d,h} rx, [sp], #+/-imm) or
+ * add == 1 (str{,d,h} rx, [sp, #+<imm>])
+ * should be STACK_USE_NONE.
+ * Only str{,b,d,h} rx,[sp,#-n] (P == 1 and U == 0) are
+ * required to be examined.
+ */
+ /* STR{,B} Rt,[SP,#-n] cccc 0101 0xx0 1101 xxxx xxxx xxxx xxxx */
+ DECODE_CUSTOM (0x0f9f0000, 0x050d0000, STACK_USE_FIXED_XXX),
+
+ /* STR{D,H} Rt,[SP,#-n] cccc 0001 01x0 1101 xxxx xxxx 1x11 xxxx */
+ DECODE_CUSTOM (0x0fdf00b0, 0x014d00b0, STACK_USE_FIXED_X0X),
+
+ /* fall through */
+ DECODE_CUSTOM (0, 0, STACK_USE_NONE),
+ DECODE_END
+ };
+
+ return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL);
+}
+
+const struct decode_checker arm_stack_checker[NUM_PROBES_ARM_ACTIONS] = {
+ [PROBES_LDRSTRD] = {.checker = arm_check_stack},
+ [PROBES_STORE_EXTRA] = {.checker = arm_check_stack},
+ [PROBES_STORE] = {.checker = arm_check_stack},
+ [PROBES_LDMSTM] = {.checker = arm_check_stack},
+};
+
+static enum probes_insn __kprobes arm_check_regs_nouse(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ asi->register_usage_flags = 0;
+ return INSN_GOOD;
+}
+
+static enum probes_insn arm_check_regs_normal(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ u32 regs = h->type_regs.bits >> DECODE_TYPE_BITS;
+ int i;
+
+ asi->register_usage_flags = 0;
+ for (i = 0; i < 5; regs >>= 4, insn >>= 4, i++)
+ if (regs & 0xf)
+ asi->register_usage_flags |= 1 << (insn & 0xf);
+
+ return INSN_GOOD;
+}
+
+
+static enum probes_insn arm_check_regs_ldmstm(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ unsigned int reglist = insn & 0xffff;
+ unsigned int rn = (insn >> 16) & 0xf;
+ asi->register_usage_flags = reglist | (1 << rn);
+ return INSN_GOOD;
+}
+
+static enum probes_insn arm_check_regs_mov_ip_sp(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ /* Instruction is 'mov ip, sp' i.e. 'mov r12, r13' */
+ asi->register_usage_flags = (1 << 12) | (1<< 13);
+ return INSN_GOOD;
+}
+
+/*
+ * | Rn |Rt/d| | Rm |
+ * LDRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1101 xxxx
+ * STRD (register) cccc 000x x0x0 xxxx xxxx xxxx 1111 xxxx
+ * | Rn |Rt/d| |imm4L|
+ * LDRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1101 xxxx
+ * STRD (immediate) cccc 000x x1x0 xxxx xxxx xxxx 1111 xxxx
+ *
+ * Such instructions access Rt/d and its next register, so different
+ * from others, a specific checker is required to handle this extra
+ * implicit register usage.
+ */
+static enum probes_insn arm_check_regs_ldrdstrd(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int rdt = (insn >> 12) & 0xf;
+ arm_check_regs_normal(insn, asi, h);
+ asi->register_usage_flags |= 1 << (rdt + 1);
+ return INSN_GOOD;
+}
+
+
+const struct decode_checker arm_regs_checker[NUM_PROBES_ARM_ACTIONS] = {
+ [PROBES_MRS] = {.checker = arm_check_regs_normal},
+ [PROBES_SATURATING_ARITHMETIC] = {.checker = arm_check_regs_normal},
+ [PROBES_MUL1] = {.checker = arm_check_regs_normal},
+ [PROBES_MUL2] = {.checker = arm_check_regs_normal},
+ [PROBES_MUL_ADD_LONG] = {.checker = arm_check_regs_normal},
+ [PROBES_MUL_ADD] = {.checker = arm_check_regs_normal},
+ [PROBES_LOAD] = {.checker = arm_check_regs_normal},
+ [PROBES_LOAD_EXTRA] = {.checker = arm_check_regs_normal},
+ [PROBES_STORE] = {.checker = arm_check_regs_normal},
+ [PROBES_STORE_EXTRA] = {.checker = arm_check_regs_normal},
+ [PROBES_DATA_PROCESSING_REG] = {.checker = arm_check_regs_normal},
+ [PROBES_DATA_PROCESSING_IMM] = {.checker = arm_check_regs_normal},
+ [PROBES_SEV] = {.checker = arm_check_regs_nouse},
+ [PROBES_WFE] = {.checker = arm_check_regs_nouse},
+ [PROBES_SATURATE] = {.checker = arm_check_regs_normal},
+ [PROBES_REV] = {.checker = arm_check_regs_normal},
+ [PROBES_MMI] = {.checker = arm_check_regs_normal},
+ [PROBES_PACK] = {.checker = arm_check_regs_normal},
+ [PROBES_EXTEND] = {.checker = arm_check_regs_normal},
+ [PROBES_EXTEND_ADD] = {.checker = arm_check_regs_normal},
+ [PROBES_BITFIELD] = {.checker = arm_check_regs_normal},
+ [PROBES_LDMSTM] = {.checker = arm_check_regs_ldmstm},
+ [PROBES_MOV_IP_SP] = {.checker = arm_check_regs_mov_ip_sp},
+ [PROBES_LDRSTRD] = {.checker = arm_check_regs_ldrdstrd},
+};
diff --git a/arch/arm/probes/kprobes/checkers-common.c b/arch/arm/probes/kprobes/checkers-common.c
new file mode 100644
index 000000000..971119c29
--- /dev/null
+++ b/arch/arm/probes/kprobes/checkers-common.c
@@ -0,0 +1,101 @@
+/*
+ * arch/arm/probes/kprobes/checkers-common.c
+ *
+ * Copyright (C) 2014 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "../decode.h"
+#include "../decode-arm.h"
+#include "checkers.h"
+
+enum probes_insn checker_stack_use_none(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ asi->stack_space = 0;
+ return INSN_GOOD_NO_SLOT;
+}
+
+enum probes_insn checker_stack_use_unknown(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ asi->stack_space = -1;
+ return INSN_GOOD_NO_SLOT;
+}
+
+#ifdef CONFIG_THUMB2_KERNEL
+enum probes_insn checker_stack_use_imm_0xx(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int imm = insn & 0xff;
+ asi->stack_space = imm;
+ return INSN_GOOD_NO_SLOT;
+}
+
+/*
+ * Different from other insn uses imm8, the real addressing offset of
+ * STRD in T32 encoding should be imm8 * 4. See ARMARM description.
+ */
+enum probes_insn checker_stack_use_t32strd(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int imm = insn & 0xff;
+ asi->stack_space = imm << 2;
+ return INSN_GOOD_NO_SLOT;
+}
+#else
+enum probes_insn checker_stack_use_imm_x0x(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int imm = ((insn & 0xf00) >> 4) + (insn & 0xf);
+ asi->stack_space = imm;
+ return INSN_GOOD_NO_SLOT;
+}
+#endif
+
+enum probes_insn checker_stack_use_imm_xxx(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ int imm = insn & 0xfff;
+ asi->stack_space = imm;
+ return INSN_GOOD_NO_SLOT;
+}
+
+enum probes_insn checker_stack_use_stmdx(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ unsigned int reglist = insn & 0xffff;
+ int pbit = insn & (1 << 24);
+ asi->stack_space = (hweight32(reglist) - (!pbit ? 1 : 0)) * 4;
+
+ return INSN_GOOD_NO_SLOT;
+}
+
+const union decode_action stack_check_actions[] = {
+ [STACK_USE_NONE] = {.decoder = checker_stack_use_none},
+ [STACK_USE_UNKNOWN] = {.decoder = checker_stack_use_unknown},
+#ifdef CONFIG_THUMB2_KERNEL
+ [STACK_USE_FIXED_0XX] = {.decoder = checker_stack_use_imm_0xx},
+ [STACK_USE_T32STRD] = {.decoder = checker_stack_use_t32strd},
+#else
+ [STACK_USE_FIXED_X0X] = {.decoder = checker_stack_use_imm_x0x},
+#endif
+ [STACK_USE_FIXED_XXX] = {.decoder = checker_stack_use_imm_xxx},
+ [STACK_USE_STMDX] = {.decoder = checker_stack_use_stmdx},
+};
diff --git a/arch/arm/probes/kprobes/checkers-thumb.c b/arch/arm/probes/kprobes/checkers-thumb.c
new file mode 100644
index 000000000..d608e3b90
--- /dev/null
+++ b/arch/arm/probes/kprobes/checkers-thumb.c
@@ -0,0 +1,110 @@
+/*
+ * arch/arm/probes/kprobes/checkers-thumb.c
+ *
+ * Copyright (C) 2014 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include "../decode.h"
+#include "../decode-thumb.h"
+#include "checkers.h"
+
+static enum probes_insn __kprobes t32_check_stack(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ /*
+ * PROBES_T32_LDMSTM, PROBES_T32_LDRDSTRD and PROBES_T32_LDRSTR
+ * may get here. Simply mark all normal insns as STACK_USE_NONE.
+ */
+ static const union decode_item table[] = {
+
+ /*
+ * First, filter out all ldr insns to make our life easier.
+ * Following load insns may come here:
+ * LDM, LDRD, LDR.
+ * In T32 encoding, bit 20 is enough for distinguishing
+ * load and store. All load insns have this bit set, when
+ * all store insns have this bit clear.
+ */
+ DECODE_CUSTOM (0x00100000, 0x00100000, STACK_USE_NONE),
+
+ /*
+ * Mark all 'STR{,B,H}, Rt, [Rn, Rm]' as STACK_USE_UNKNOWN
+ * if Rn or Rm is SP. T32 doesn't encode STRD.
+ */
+ /* xx | Rn | Rt | | Rm |*/
+ /* STR (register) 1111 1000 0100 xxxx xxxx 0000 00xx xxxx */
+ /* STRB (register) 1111 1000 0000 xxxx xxxx 0000 00xx xxxx */
+ /* STRH (register) 1111 1000 0010 xxxx xxxx 0000 00xx xxxx */
+ /* INVALID INSN 1111 1000 0110 xxxx xxxx 0000 00xx xxxx */
+ /* By Introducing INVALID INSN, bit 21 and 22 can be ignored. */
+ DECODE_OR (0xff9f0fc0, 0xf80d0000),
+ DECODE_CUSTOM (0xff900fcf, 0xf800000d, STACK_USE_UNKNOWN),
+
+
+ /* xx | Rn | Rt | PUW| imm8 |*/
+ /* STR (imm 8) 1111 1000 0100 1101 xxxx 110x xxxx xxxx */
+ /* STRB (imm 8) 1111 1000 0000 1101 xxxx 110x xxxx xxxx */
+ /* STRH (imm 8) 1111 1000 0010 1101 xxxx 110x xxxx xxxx */
+ /* INVALID INSN 1111 1000 0110 1101 xxxx 110x xxxx xxxx */
+ /* Only consider U == 0 and P == 1: strx rx, [sp, #-<imm>] */
+ DECODE_CUSTOM (0xff9f0e00, 0xf80d0c00, STACK_USE_FIXED_0XX),
+
+ /* For STR{,B,H} (imm 12), offset is always positive, so ignore them. */
+
+ /* P U W | Rn | Rt | Rt2| imm8 |*/
+ /* STRD (immediate) 1110 1001 01x0 1101 xxxx xxxx xxxx xxxx */
+ /*
+ * Only consider U == 0 and P == 1.
+ * Also note that STRD in T32 encoding is special:
+ * imm = ZeroExtend(imm8:'00', 32)
+ */
+ DECODE_CUSTOM (0xffdf0000, 0xe94d0000, STACK_USE_T32STRD),
+
+ /* | Rn | */
+ /* STMDB 1110 1001 00x0 1101 xxxx xxxx xxxx xxxx */
+ DECODE_CUSTOM (0xffdf0000, 0xe90d0000, STACK_USE_STMDX),
+
+ /* fall through */
+ DECODE_CUSTOM (0, 0, STACK_USE_NONE),
+ DECODE_END
+ };
+
+ return probes_decode_insn(insn, asi, table, false, false, stack_check_actions, NULL);
+}
+
+const struct decode_checker t32_stack_checker[NUM_PROBES_T32_ACTIONS] = {
+ [PROBES_T32_LDMSTM] = {.checker = t32_check_stack},
+ [PROBES_T32_LDRDSTRD] = {.checker = t32_check_stack},
+ [PROBES_T32_LDRSTR] = {.checker = t32_check_stack},
+};
+
+/*
+ * See following comments. This insn must be 'push'.
+ */
+static enum probes_insn __kprobes t16_check_stack(probes_opcode_t insn,
+ struct arch_probes_insn *asi,
+ const struct decode_header *h)
+{
+ unsigned int reglist = insn & 0x1ff;
+ asi->stack_space = hweight32(reglist) * 4;
+ return INSN_GOOD;
+}
+
+/*
+ * T16 encoding is simple: only the 'push' insn can need extra stack space.
+ * Other insns, like str, can only use r0-r7 as Rn.
+ */
+const struct decode_checker t16_stack_checker[NUM_PROBES_T16_ACTIONS] = {
+ [PROBES_T16_PUSH] = {.checker = t16_check_stack},
+};
diff --git a/arch/arm/probes/kprobes/checkers.h b/arch/arm/probes/kprobes/checkers.h
new file mode 100644
index 000000000..cf6c9e74d
--- /dev/null
+++ b/arch/arm/probes/kprobes/checkers.h
@@ -0,0 +1,55 @@
+/*
+ * arch/arm/probes/kprobes/checkers.h
+ *
+ * Copyright (C) 2014 Huawei Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef _ARM_KERNEL_PROBES_CHECKERS_H
+#define _ARM_KERNEL_PROBES_CHECKERS_H
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include "../decode.h"
+
+extern probes_check_t checker_stack_use_none;
+extern probes_check_t checker_stack_use_unknown;
+#ifdef CONFIG_THUMB2_KERNEL
+extern probes_check_t checker_stack_use_imm_0xx;
+#else
+extern probes_check_t checker_stack_use_imm_x0x;
+#endif
+extern probes_check_t checker_stack_use_imm_xxx;
+extern probes_check_t checker_stack_use_stmdx;
+
+enum {
+ STACK_USE_NONE,
+ STACK_USE_UNKNOWN,
+#ifdef CONFIG_THUMB2_KERNEL
+ STACK_USE_FIXED_0XX,
+ STACK_USE_T32STRD,
+#else
+ STACK_USE_FIXED_X0X,
+#endif
+ STACK_USE_FIXED_XXX,
+ STACK_USE_STMDX,
+ NUM_STACK_USE_TYPES
+};
+
+extern const union decode_action stack_check_actions[];
+
+#ifndef CONFIG_THUMB2_KERNEL
+extern const struct decode_checker arm_stack_checker[];
+extern const struct decode_checker arm_regs_checker[];
+#else
+#endif
+extern const struct decode_checker t32_stack_checker[];
+extern const struct decode_checker t16_stack_checker[];
+#endif
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
new file mode 100644
index 000000000..62da8e221
--- /dev/null
+++ b/arch/arm/probes/kprobes/core.c
@@ -0,0 +1,565 @@
+/*
+ * arch/arm/kernel/kprobes.c
+ *
+ * Kprobes on ARM
+ *
+ * Abhishek Sagar <sagar.abhishek@gmail.com>
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * Nicolas Pitre <nico@marvell.com>
+ * Copyright (C) 2007 Marvell Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/kprobes.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/stop_machine.h>
+#include <linux/sched/debug.h>
+#include <linux/stringify.h>
+#include <asm/traps.h>
+#include <asm/opcodes.h>
+#include <asm/cacheflush.h>
+#include <linux/percpu.h>
+#include <linux/bug.h>
+#include <asm/patch.h>
+#include <asm/sections.h>
+
+#include "../decode-arm.h"
+#include "../decode-thumb.h"
+#include "core.h"
+
+#define MIN_STACK_SIZE(addr) \
+ min((unsigned long)MAX_STACK_SIZE, \
+ (unsigned long)current_thread_info() + THREAD_START_SP - (addr))
+
+#define flush_insns(addr, size) \
+ flush_icache_range((unsigned long)(addr), \
+ (unsigned long)(addr) + \
+ (size))
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+
+int __kprobes arch_prepare_kprobe(struct kprobe *p)
+{
+ kprobe_opcode_t insn;
+ kprobe_opcode_t tmp_insn[MAX_INSN_SIZE];
+ unsigned long addr = (unsigned long)p->addr;
+ bool thumb;
+ kprobe_decode_insn_t *decode_insn;
+ const union decode_action *actions;
+ int is;
+ const struct decode_checker **checkers;
+
+#ifdef CONFIG_THUMB2_KERNEL
+ thumb = true;
+ addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
+ insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
+ if (is_wide_instruction(insn)) {
+ u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
+ insn = __opcode_thumb32_compose(insn, inst2);
+ decode_insn = thumb32_probes_decode_insn;
+ actions = kprobes_t32_actions;
+ checkers = kprobes_t32_checkers;
+ } else {
+ decode_insn = thumb16_probes_decode_insn;
+ actions = kprobes_t16_actions;
+ checkers = kprobes_t16_checkers;
+ }
+#else /* !CONFIG_THUMB2_KERNEL */
+ thumb = false;
+ if (addr & 0x3)
+ return -EINVAL;
+ insn = __mem_to_opcode_arm(*p->addr);
+ decode_insn = arm_probes_decode_insn;
+ actions = kprobes_arm_actions;
+ checkers = kprobes_arm_checkers;
+#endif
+
+ p->opcode = insn;
+ p->ainsn.insn = tmp_insn;
+
+ switch ((*decode_insn)(insn, &p->ainsn, true, actions, checkers)) {
+ case INSN_REJECTED: /* not supported */
+ return -EINVAL;
+
+ case INSN_GOOD: /* instruction uses slot */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+ for (is = 0; is < MAX_INSN_SIZE; ++is)
+ p->ainsn.insn[is] = tmp_insn[is];
+ flush_insns(p->ainsn.insn,
+ sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE);
+ p->ainsn.insn_fn = (probes_insn_fn_t *)
+ ((uintptr_t)p->ainsn.insn | thumb);
+ break;
+
+ case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */
+ p->ainsn.insn = NULL;
+ break;
+ }
+
+ /*
+ * Never instrument insn like 'str r0, [sp, +/-r1]'. Also, insn likes
+ * 'str r0, [sp, #-68]' should also be prohibited.
+ * See __und_svc.
+ */
+ if ((p->ainsn.stack_space < 0) ||
+ (p->ainsn.stack_space > MAX_STACK_SIZE))
+ return -EINVAL;
+
+ return 0;
+}
+
+void __kprobes arch_arm_kprobe(struct kprobe *p)
+{
+ unsigned int brkp;
+ void *addr;
+
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ /* Remove any Thumb flag */
+ addr = (void *)((uintptr_t)p->addr & ~1);
+
+ if (is_wide_instruction(p->opcode))
+ brkp = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION;
+ else
+ brkp = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION;
+ } else {
+ kprobe_opcode_t insn = p->opcode;
+
+ addr = p->addr;
+ brkp = KPROBE_ARM_BREAKPOINT_INSTRUCTION;
+
+ if (insn >= 0xe0000000)
+ brkp |= 0xe0000000; /* Unconditional instruction */
+ else
+ brkp |= insn & 0xf0000000; /* Copy condition from insn */
+ }
+
+ patch_text(addr, brkp);
+}
+
+/*
+ * The actual disarming is done here on each CPU and synchronized using
+ * stop_machine. This synchronization is necessary on SMP to avoid removing
+ * a probe between the moment the 'Undefined Instruction' exception is raised
+ * and the moment the exception handler reads the faulting instruction from
+ * memory. It is also needed to atomically set the two half-words of a 32-bit
+ * Thumb breakpoint.
+ */
+struct patch {
+ void *addr;
+ unsigned int insn;
+};
+
+static int __kprobes_remove_breakpoint(void *data)
+{
+ struct patch *p = data;
+ __patch_text(p->addr, p->insn);
+ return 0;
+}
+
+void __kprobes kprobes_remove_breakpoint(void *addr, unsigned int insn)
+{
+ struct patch p = {
+ .addr = addr,
+ .insn = insn,
+ };
+ stop_machine_cpuslocked(__kprobes_remove_breakpoint, &p,
+ cpu_online_mask);
+}
+
+void __kprobes arch_disarm_kprobe(struct kprobe *p)
+{
+ kprobes_remove_breakpoint((void *)((uintptr_t)p->addr & ~1),
+ p->opcode);
+}
+
+void __kprobes arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+
+static void __kprobes set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+
+static void __kprobes
+singlestep_skip(struct kprobe *p, struct pt_regs *regs)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
+ if (is_wide_instruction(p->opcode))
+ regs->ARM_pc += 4;
+ else
+ regs->ARM_pc += 2;
+#else
+ regs->ARM_pc += 4;
+#endif
+}
+
+static inline void __kprobes
+singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
+{
+ p->ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
+}
+
+/*
+ * Called with IRQs disabled. IRQs must remain disabled from that point
+ * all the way until processing this kprobe is complete. The current
+ * kprobes implementation cannot process more than one nested level of
+ * kprobe, and that level is reserved for user kprobe handlers, so we can't
+ * risk encountering a new kprobe in an interrupt handler.
+ */
+void __kprobes kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p, *cur;
+ struct kprobe_ctlblk *kcb;
+
+ kcb = get_kprobe_ctlblk();
+ cur = kprobe_running();
+
+#ifdef CONFIG_THUMB2_KERNEL
+ /*
+ * First look for a probe which was registered using an address with
+ * bit 0 set, this is the usual situation for pointers to Thumb code.
+ * If not found, fallback to looking for one with bit 0 clear.
+ */
+ p = get_kprobe((kprobe_opcode_t *)(regs->ARM_pc | 1));
+ if (!p)
+ p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
+
+#else /* ! CONFIG_THUMB2_KERNEL */
+ p = get_kprobe((kprobe_opcode_t *)regs->ARM_pc);
+#endif
+
+ if (p) {
+ if (!p->ainsn.insn_check_cc(regs->ARM_cpsr)) {
+ /*
+ * Probe hit but conditional execution check failed,
+ * so just skip the instruction and continue as if
+ * nothing had happened.
+ * In this case, we can skip recursing check too.
+ */
+ singlestep_skip(p, regs);
+ } else if (cur) {
+ /* Kprobe is pending, so we're recursing. */
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_SS:
+ /* A pre- or post-handler probe got us here. */
+ kprobes_inc_nmissed_count(p);
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ singlestep(p, regs, kcb);
+ restore_previous_kprobe(kcb);
+ break;
+ case KPROBE_REENTER:
+ /* A nested probe was hit in FIQ, it is a BUG */
+ pr_warn("Unrecoverable kprobe detected.\n");
+ dump_kprobe(p);
+ /* fall through */
+ default:
+ /* impossible cases */
+ BUG();
+ }
+ } else {
+ /* Probe hit and conditional execution check ok. */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ singlestep(p, regs, kcb);
+ if (p->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ }
+ reset_current_kprobe();
+ }
+ } else {
+ /*
+ * The probe was removed and a race is in progress.
+ * There is nothing we can do about it. Let's restart
+ * the instruction. By the time we can restart, the
+ * real instruction will be there.
+ */
+ }
+}
+
+static int __kprobes kprobe_trap_handler(struct pt_regs *regs, unsigned int instr)
+{
+ unsigned long flags;
+ local_irq_save(flags);
+ kprobe_handler(regs);
+ local_irq_restore(flags);
+ return 0;
+}
+
+int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the PC to point back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->ARM_pc = (long)cur->addr;
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ } else {
+ reset_current_kprobe();
+ }
+ break;
+
+ case KPROBE_HIT_ACTIVE:
+ case KPROBE_HIT_SSDONE:
+ /*
+ * We increment the nmissed count for accounting,
+ * we can also use npre/npostfault count for accounting
+ * these specific fault cases.
+ */
+ kprobes_inc_nmissed_count(cur);
+
+ /*
+ * We come here because instructions in the pre/post
+ * handler caused the page_fault, this could happen
+ * if handler tries to access user space by
+ * copy_from_user(), get_user() etc. Let the
+ * user-specified handler try to fix it.
+ */
+ if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
+ return 1;
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ /*
+ * notify_die() is currently never called on ARM,
+ * so this callback is currently empty.
+ */
+ return NOTIFY_DONE;
+}
+
+/*
+ * When a retprobed function returns, trampoline_handler() is called,
+ * calling the kretprobe's handler. We construct a struct pt_regs to
+ * give a view of registers r0-r11 to the user return-handler. This is
+ * not a complete pt_regs structure, but that should be plenty sufficient
+ * for kretprobe handlers which should normally be interested in r0 only
+ * anyway.
+ */
+void __naked __kprobes kretprobe_trampoline(void)
+{
+ __asm__ __volatile__ (
+ "stmdb sp!, {r0 - r11} \n\t"
+ "mov r0, sp \n\t"
+ "bl trampoline_handler \n\t"
+ "mov lr, r0 \n\t"
+ "ldmia sp!, {r0 - r11} \n\t"
+#ifdef CONFIG_THUMB2_KERNEL
+ "bx lr \n\t"
+#else
+ "mov pc, lr \n\t"
+#endif
+ : : : "memory");
+}
+
+/* Called from kretprobe_trampoline */
+static __used __kprobes void *trampoline_handler(struct pt_regs *regs)
+{
+ struct kretprobe_instance *ri = NULL;
+ struct hlist_head *head, empty_rp;
+ struct hlist_node *tmp;
+ unsigned long flags, orig_ret_address = 0;
+ unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
+ kprobe_opcode_t *correct_ret_addr = NULL;
+
+ INIT_HLIST_HEAD(&empty_rp);
+ kretprobe_hash_lock(current, &head, &flags);
+
+ /*
+ * It is possible to have multiple instances associated with a given
+ * task either because multiple functions in the call path have
+ * a return probe installed on them, and/or more than one return
+ * probe was registered for a target function.
+ *
+ * We can handle this because:
+ * - instances are always inserted at the head of the list
+ * - when multiple return probes are registered for the same
+ * function, the first instance's ret_addr will point to the
+ * real return address, and all the rest will point to
+ * kretprobe_trampoline
+ */
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_assert(ri, orig_ret_address, trampoline_address);
+
+ correct_ret_addr = ri->ret_addr;
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
+ if (ri->task != current)
+ /* another task is sharing our hash bucket */
+ continue;
+
+ orig_ret_address = (unsigned long)ri->ret_addr;
+ if (ri->rp && ri->rp->handler) {
+ __this_cpu_write(current_kprobe, &ri->rp->kp);
+ get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
+ ri->ret_addr = correct_ret_addr;
+ ri->rp->handler(ri, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+ recycle_rp_inst(ri, &empty_rp);
+
+ if (orig_ret_address != trampoline_address)
+ /*
+ * This is the real return address. Any other
+ * instances associated with this task are for
+ * other calls deeper on the call stack
+ */
+ break;
+ }
+
+ kretprobe_hash_unlock(current, &flags);
+
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
+ hlist_del(&ri->hlist);
+ kfree(ri);
+ }
+
+ return (void *)orig_ret_address;
+}
+
+void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *)regs->ARM_lr;
+
+ /* Replace the return addr with trampoline addr. */
+ regs->ARM_lr = (unsigned long)&kretprobe_trampoline;
+}
+
+int __kprobes arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+static struct undef_hook kprobes_thumb16_break_hook = {
+ .instr_mask = 0xffff,
+ .instr_val = KPROBE_THUMB16_BREAKPOINT_INSTRUCTION,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
+ .fn = kprobe_trap_handler,
+};
+
+static struct undef_hook kprobes_thumb32_break_hook = {
+ .instr_mask = 0xffffffff,
+ .instr_val = KPROBE_THUMB32_BREAKPOINT_INSTRUCTION,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
+ .fn = kprobe_trap_handler,
+};
+
+#else /* !CONFIG_THUMB2_KERNEL */
+
+static struct undef_hook kprobes_arm_break_hook = {
+ .instr_mask = 0x0fffffff,
+ .instr_val = KPROBE_ARM_BREAKPOINT_INSTRUCTION,
+ .cpsr_mask = MODE_MASK,
+ .cpsr_val = SVC_MODE,
+ .fn = kprobe_trap_handler,
+};
+
+#endif /* !CONFIG_THUMB2_KERNEL */
+
+int __init arch_init_kprobes(void)
+{
+ arm_probes_decode_init();
+#ifdef CONFIG_THUMB2_KERNEL
+ register_undef_hook(&kprobes_thumb16_break_hook);
+ register_undef_hook(&kprobes_thumb32_break_hook);
+#else
+ register_undef_hook(&kprobes_arm_break_hook);
+#endif
+ return 0;
+}
+
+bool arch_within_kprobe_blacklist(unsigned long addr)
+{
+ void *a = (void *)addr;
+
+ return __in_irqentry_text(addr) ||
+ in_entry_text(addr) ||
+ in_idmap_text(addr) ||
+ memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1);
+}
diff --git a/arch/arm/probes/kprobes/core.h b/arch/arm/probes/kprobes/core.h
new file mode 100644
index 000000000..ec5d1f20a
--- /dev/null
+++ b/arch/arm/probes/kprobes/core.h
@@ -0,0 +1,58 @@
+/*
+ * arch/arm/kernel/kprobes.h
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * Some contents moved here from arch/arm/include/asm/kprobes.h which is
+ * Copyright (C) 2006, 2007 Motorola Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+
+#ifndef _ARM_KERNEL_KPROBES_H
+#define _ARM_KERNEL_KPROBES_H
+
+#include <asm/kprobes.h>
+#include "../decode.h"
+
+/*
+ * These undefined instructions must be unique and
+ * reserved solely for kprobes' use.
+ */
+#define KPROBE_ARM_BREAKPOINT_INSTRUCTION 0x07f001f8
+#define KPROBE_THUMB16_BREAKPOINT_INSTRUCTION 0xde18
+#define KPROBE_THUMB32_BREAKPOINT_INSTRUCTION 0xf7f0a018
+
+extern void kprobes_remove_breakpoint(void *addr, unsigned int insn);
+
+enum probes_insn __kprobes
+kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_probes_insn *asi,
+ const struct decode_header *h);
+
+typedef enum probes_insn (kprobe_decode_insn_t)(probes_opcode_t,
+ struct arch_probes_insn *,
+ bool,
+ const union decode_action *,
+ const struct decode_checker *[]);
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+extern const union decode_action kprobes_t32_actions[];
+extern const union decode_action kprobes_t16_actions[];
+extern const struct decode_checker *kprobes_t32_checkers[];
+extern const struct decode_checker *kprobes_t16_checkers[];
+#else /* !CONFIG_THUMB2_KERNEL */
+
+extern const union decode_action kprobes_arm_actions[];
+extern const struct decode_checker *kprobes_arm_checkers[];
+
+#endif
+
+#endif /* _ARM_KERNEL_KPROBES_H */
diff --git a/arch/arm/probes/kprobes/opt-arm.c b/arch/arm/probes/kprobes/opt-arm.c
new file mode 100644
index 000000000..cf08cb726
--- /dev/null
+++ b/arch/arm/probes/kprobes/opt-arm.c
@@ -0,0 +1,372 @@
+/*
+ * Kernel Probes Jump Optimization (Optprobes)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright (C) Huawei Inc., 2014
+ */
+
+#include <linux/kprobes.h>
+#include <linux/jump_label.h>
+#include <asm/kprobes.h>
+#include <asm/cacheflush.h>
+/* for arm_gen_branch */
+#include <asm/insn.h>
+/* for patch_text */
+#include <asm/patch.h>
+
+#include "core.h"
+
+/*
+ * See register_usage_flags. If the probed instruction doesn't use PC,
+ * we can copy it into template and have it executed directly without
+ * simulation or emulation.
+ */
+#define ARM_REG_PC 15
+#define can_kprobe_direct_exec(m) (!test_bit(ARM_REG_PC, &(m)))
+
+/*
+ * NOTE: the first sub and add instruction will be modified according
+ * to the stack cost of the instruction.
+ */
+asm (
+ ".global optprobe_template_entry\n"
+ "optprobe_template_entry:\n"
+ ".global optprobe_template_sub_sp\n"
+ "optprobe_template_sub_sp:"
+ " sub sp, sp, #0xff\n"
+ " stmia sp, {r0 - r14} \n"
+ ".global optprobe_template_add_sp\n"
+ "optprobe_template_add_sp:"
+ " add r3, sp, #0xff\n"
+ " str r3, [sp, #52]\n"
+ " mrs r4, cpsr\n"
+ " str r4, [sp, #64]\n"
+ " mov r1, sp\n"
+ " ldr r0, 1f\n"
+ " ldr r2, 2f\n"
+ /*
+ * AEABI requires an 8-bytes alignment stack. If
+ * SP % 8 != 0 (SP % 4 == 0 should be ensured),
+ * alloc more bytes here.
+ */
+ " and r4, sp, #4\n"
+ " sub sp, sp, r4\n"
+#if __LINUX_ARM_ARCH__ >= 5
+ " blx r2\n"
+#else
+ " mov lr, pc\n"
+ " mov pc, r2\n"
+#endif
+ " add sp, sp, r4\n"
+ " ldr r1, [sp, #64]\n"
+ " tst r1, #"__stringify(PSR_T_BIT)"\n"
+ " ldrne r2, [sp, #60]\n"
+ " orrne r2, #1\n"
+ " strne r2, [sp, #60] @ set bit0 of PC for thumb\n"
+ " msr cpsr_cxsf, r1\n"
+ ".global optprobe_template_restore_begin\n"
+ "optprobe_template_restore_begin:\n"
+ " ldmia sp, {r0 - r15}\n"
+ ".global optprobe_template_restore_orig_insn\n"
+ "optprobe_template_restore_orig_insn:\n"
+ " nop\n"
+ ".global optprobe_template_restore_end\n"
+ "optprobe_template_restore_end:\n"
+ " nop\n"
+ ".global optprobe_template_val\n"
+ "optprobe_template_val:\n"
+ "1: .long 0\n"
+ ".global optprobe_template_call\n"
+ "optprobe_template_call:\n"
+ "2: .long 0\n"
+ ".global optprobe_template_end\n"
+ "optprobe_template_end:\n");
+
+#define TMPL_VAL_IDX \
+ ((unsigned long *)optprobe_template_val - (unsigned long *)optprobe_template_entry)
+#define TMPL_CALL_IDX \
+ ((unsigned long *)optprobe_template_call - (unsigned long *)optprobe_template_entry)
+#define TMPL_END_IDX \
+ ((unsigned long *)optprobe_template_end - (unsigned long *)optprobe_template_entry)
+#define TMPL_ADD_SP \
+ ((unsigned long *)optprobe_template_add_sp - (unsigned long *)optprobe_template_entry)
+#define TMPL_SUB_SP \
+ ((unsigned long *)optprobe_template_sub_sp - (unsigned long *)optprobe_template_entry)
+#define TMPL_RESTORE_BEGIN \
+ ((unsigned long *)optprobe_template_restore_begin - (unsigned long *)optprobe_template_entry)
+#define TMPL_RESTORE_ORIGN_INSN \
+ ((unsigned long *)optprobe_template_restore_orig_insn - (unsigned long *)optprobe_template_entry)
+#define TMPL_RESTORE_END \
+ ((unsigned long *)optprobe_template_restore_end - (unsigned long *)optprobe_template_entry)
+
+/*
+ * ARM can always optimize an instruction when using ARM ISA, except
+ * instructions like 'str r0, [sp, r1]' which store to stack and unable
+ * to determine stack space consumption statically.
+ */
+int arch_prepared_optinsn(struct arch_optimized_insn *optinsn)
+{
+ return optinsn->insn != NULL;
+}
+
+/*
+ * In ARM ISA, kprobe opt always replace one instruction (4 bytes
+ * aligned and 4 bytes long). It is impossible to encounter another
+ * kprobe in the address range. So always return 0.
+ */
+int arch_check_optimized_kprobe(struct optimized_kprobe *op)
+{
+ return 0;
+}
+
+/* Caller must ensure addr & 3 == 0 */
+static int can_optimize(struct kprobe *kp)
+{
+ if (kp->ainsn.stack_space < 0)
+ return 0;
+ /*
+ * 255 is the biggest imm can be used in 'sub r0, r0, #<imm>'.
+ * Number larger than 255 needs special encoding.
+ */
+ if (kp->ainsn.stack_space > 255 - sizeof(struct pt_regs))
+ return 0;
+ return 1;
+}
+
+/* Free optimized instruction slot */
+static void
+__arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty)
+{
+ if (op->optinsn.insn) {
+ free_optinsn_slot(op->optinsn.insn, dirty);
+ op->optinsn.insn = NULL;
+ }
+}
+
+extern void kprobe_handler(struct pt_regs *regs);
+
+static void
+optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs)
+{
+ unsigned long flags;
+ struct kprobe *p = &op->kp;
+ struct kprobe_ctlblk *kcb;
+
+ /* Save skipped registers */
+ regs->ARM_pc = (unsigned long)op->kp.addr;
+ regs->ARM_ORIG_r0 = ~0UL;
+
+ local_irq_save(flags);
+ kcb = get_kprobe_ctlblk();
+
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(&op->kp);
+ } else {
+ __this_cpu_write(current_kprobe, &op->kp);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ opt_pre_handler(&op->kp, regs);
+ __this_cpu_write(current_kprobe, NULL);
+ }
+
+ /*
+ * We singlestep the replaced instruction only when it can't be
+ * executed directly during restore.
+ */
+ if (!p->ainsn.kprobe_direct_exec)
+ op->kp.ainsn.insn_singlestep(p->opcode, &p->ainsn, regs);
+
+ local_irq_restore(flags);
+}
+NOKPROBE_SYMBOL(optimized_callback)
+
+int arch_prepare_optimized_kprobe(struct optimized_kprobe *op, struct kprobe *orig)
+{
+ kprobe_opcode_t *code;
+ unsigned long rel_chk;
+ unsigned long val;
+ unsigned long stack_protect = sizeof(struct pt_regs);
+
+ if (!can_optimize(orig))
+ return -EILSEQ;
+
+ code = get_optinsn_slot();
+ if (!code)
+ return -ENOMEM;
+
+ /*
+ * Verify if the address gap is in 32MiB range, because this uses
+ * a relative jump.
+ *
+ * kprobe opt use a 'b' instruction to branch to optinsn.insn.
+ * According to ARM manual, branch instruction is:
+ *
+ * 31 28 27 24 23 0
+ * +------+---+---+---+---+----------------+
+ * | cond | 1 | 0 | 1 | 0 | imm24 |
+ * +------+---+---+---+---+----------------+
+ *
+ * imm24 is a signed 24 bits integer. The real branch offset is computed
+ * by: imm32 = SignExtend(imm24:'00', 32);
+ *
+ * So the maximum forward branch should be:
+ * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
+ * The maximum backword branch should be:
+ * (0xff800000 << 2) = 0xfe000000 = -0x2000000
+ *
+ * We can simply check (rel & 0xfe000003):
+ * if rel is positive, (rel & 0xfe000000) shoule be 0
+ * if rel is negitive, (rel & 0xfe000000) should be 0xfe000000
+ * the last '3' is used for alignment checking.
+ */
+ rel_chk = (unsigned long)((long)code -
+ (long)orig->addr + 8) & 0xfe000003;
+
+ if ((rel_chk != 0) && (rel_chk != 0xfe000000)) {
+ /*
+ * Different from x86, we free code buf directly instead of
+ * calling __arch_remove_optimized_kprobe() because
+ * we have not fill any field in op.
+ */
+ free_optinsn_slot(code, 0);
+ return -ERANGE;
+ }
+
+ /* Copy arch-dep-instance from template. */
+ memcpy(code, (unsigned long *)optprobe_template_entry,
+ TMPL_END_IDX * sizeof(kprobe_opcode_t));
+
+ /* Adjust buffer according to instruction. */
+ BUG_ON(orig->ainsn.stack_space < 0);
+
+ stack_protect += orig->ainsn.stack_space;
+
+ /* Should have been filtered by can_optimize(). */
+ BUG_ON(stack_protect > 255);
+
+ /* Create a 'sub sp, sp, #<stack_protect>' */
+ code[TMPL_SUB_SP] = __opcode_to_mem_arm(0xe24dd000 | stack_protect);
+ /* Create a 'add r3, sp, #<stack_protect>' */
+ code[TMPL_ADD_SP] = __opcode_to_mem_arm(0xe28d3000 | stack_protect);
+
+ /* Set probe information */
+ val = (unsigned long)op;
+ code[TMPL_VAL_IDX] = val;
+
+ /* Set probe function call */
+ val = (unsigned long)optimized_callback;
+ code[TMPL_CALL_IDX] = val;
+
+ /* If possible, copy insn and have it executed during restore */
+ orig->ainsn.kprobe_direct_exec = false;
+ if (can_kprobe_direct_exec(orig->ainsn.register_usage_flags)) {
+ kprobe_opcode_t final_branch = arm_gen_branch(
+ (unsigned long)(&code[TMPL_RESTORE_END]),
+ (unsigned long)(op->kp.addr) + 4);
+ if (final_branch != 0) {
+ /*
+ * Replace original 'ldmia sp, {r0 - r15}' with
+ * 'ldmia {r0 - r14}', restore all registers except pc.
+ */
+ code[TMPL_RESTORE_BEGIN] = __opcode_to_mem_arm(0xe89d7fff);
+
+ /* The original probed instruction */
+ code[TMPL_RESTORE_ORIGN_INSN] = __opcode_to_mem_arm(orig->opcode);
+
+ /* Jump back to next instruction */
+ code[TMPL_RESTORE_END] = __opcode_to_mem_arm(final_branch);
+ orig->ainsn.kprobe_direct_exec = true;
+ }
+ }
+
+ flush_icache_range((unsigned long)code,
+ (unsigned long)(&code[TMPL_END_IDX]));
+
+ /* Set op->optinsn.insn means prepared. */
+ op->optinsn.insn = code;
+ return 0;
+}
+
+void __kprobes arch_optimize_kprobes(struct list_head *oplist)
+{
+ struct optimized_kprobe *op, *tmp;
+
+ list_for_each_entry_safe(op, tmp, oplist, list) {
+ unsigned long insn;
+ WARN_ON(kprobe_disabled(&op->kp));
+
+ /*
+ * Backup instructions which will be replaced
+ * by jump address
+ */
+ memcpy(op->optinsn.copied_insn, op->kp.addr,
+ RELATIVEJUMP_SIZE);
+
+ insn = arm_gen_branch((unsigned long)op->kp.addr,
+ (unsigned long)op->optinsn.insn);
+ BUG_ON(insn == 0);
+
+ /*
+ * Make it a conditional branch if replaced insn
+ * is consitional
+ */
+ insn = (__mem_to_opcode_arm(
+ op->optinsn.copied_insn[0]) & 0xf0000000) |
+ (insn & 0x0fffffff);
+
+ /*
+ * Similar to __arch_disarm_kprobe, operations which
+ * removing breakpoints must be wrapped by stop_machine
+ * to avoid racing.
+ */
+ kprobes_remove_breakpoint(op->kp.addr, insn);
+
+ list_del_init(&op->list);
+ }
+}
+
+void arch_unoptimize_kprobe(struct optimized_kprobe *op)
+{
+ arch_arm_kprobe(&op->kp);
+}
+
+/*
+ * Recover original instructions and breakpoints from relative jumps.
+ * Caller must call with locking kprobe_mutex.
+ */
+void arch_unoptimize_kprobes(struct list_head *oplist,
+ struct list_head *done_list)
+{
+ struct optimized_kprobe *op, *tmp;
+
+ list_for_each_entry_safe(op, tmp, oplist, list) {
+ arch_unoptimize_kprobe(op);
+ list_move(&op->list, done_list);
+ }
+}
+
+int arch_within_optimized_kprobe(struct optimized_kprobe *op,
+ unsigned long addr)
+{
+ return ((unsigned long)op->kp.addr <= addr &&
+ (unsigned long)op->kp.addr + RELATIVEJUMP_SIZE > addr);
+}
+
+void arch_remove_optimized_kprobe(struct optimized_kprobe *op)
+{
+ __arch_remove_optimized_kprobe(op, 1);
+}
diff --git a/arch/arm/probes/kprobes/test-arm.c b/arch/arm/probes/kprobes/test-arm.c
new file mode 100644
index 000000000..8866aedfd
--- /dev/null
+++ b/arch/arm/probes/kprobes/test-arm.c
@@ -0,0 +1,1366 @@
+/*
+ * arch/arm/kernel/kprobes-test-arm.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/system_info.h>
+#include <asm/opcodes.h>
+#include <asm/probes.h>
+
+#include "test-core.h"
+
+
+#define TEST_ISA "32"
+
+#define TEST_ARM_TO_THUMB_INTERWORK_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_REG(14, 99f) \
+ TEST_ARG_END("") \
+ "50: nop \n\t" \
+ "1: "code1 #reg code2" \n\t" \
+ " bx lr \n\t" \
+ ".thumb \n\t" \
+ "3: adr lr, 2f \n\t" \
+ " bx lr \n\t" \
+ ".arm \n\t" \
+ "2: nop \n\t" \
+ TESTCASE_END
+
+#define TEST_ARM_TO_THUMB_INTERWORK_P(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_PTR(reg, val) \
+ TEST_ARG_REG(14, 99f) \
+ TEST_ARG_MEM(15, 3f+1) \
+ TEST_ARG_END("") \
+ "50: nop \n\t" \
+ "1: "code1 #reg code2" \n\t" \
+ " bx lr \n\t" \
+ ".thumb \n\t" \
+ "3: adr lr, 2f \n\t" \
+ " bx lr \n\t" \
+ ".arm \n\t" \
+ "2: nop \n\t" \
+ TESTCASE_END
+
+
+void kprobe_arm_test_cases(void)
+{
+ kprobe_test_flags = 0;
+
+ TEST_GROUP("Data-processing (register), (register-shifted register), (immediate)")
+
+#define _DATA_PROCESSING_DNM(op,s,val) \
+ TEST_RR( op "eq" s " r0, r",1, VAL1,", r",2, val, "") \
+ TEST_RR( op "ne" s " r1, r",1, VAL1,", r",2, val, ", lsl #3") \
+ TEST_RR( op "cs" s " r2, r",3, VAL1,", r",2, val, ", lsr #4") \
+ TEST_RR( op "cc" s " r3, r",3, VAL1,", r",2, val, ", asr #5") \
+ TEST_RR( op "mi" s " r4, r",5, VAL1,", r",2, N(val),", asr #6") \
+ TEST_RR( op "pl" s " r5, r",5, VAL1,", r",2, val, ", ror #7") \
+ TEST_RR( op "vs" s " r6, r",7, VAL1,", r",2, val, ", rrx") \
+ TEST_R( op "vc" s " r6, r",7, VAL1,", pc, lsl #3") \
+ TEST_R( op "vc" s " r6, r",7, VAL1,", sp, lsr #4") \
+ TEST_R( op "vc" s " r6, pc, r",7, VAL1,", asr #5") \
+ TEST_R( op "vc" s " r6, sp, r",7, VAL1,", ror #6") \
+ TEST_RRR( op "hi" s " r8, r",9, VAL1,", r",14,val, ", lsl r",0, 3,"")\
+ TEST_RRR( op "ls" s " r9, r",9, VAL1,", r",14,val, ", lsr r",7, 4,"")\
+ TEST_RRR( op "ge" s " r10, r",11,VAL1,", r",14,val, ", asr r",7, 5,"")\
+ TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
+ TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\
+ TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\
+ TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \
+ TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \
+ TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \
+ TEST( op s " r4, pc" ", #0x00005a00")
+
+#define DATA_PROCESSING_DNM(op,val) \
+ _DATA_PROCESSING_DNM(op,"",val) \
+ _DATA_PROCESSING_DNM(op,"s",val)
+
+#define DATA_PROCESSING_NM(op,val) \
+ TEST_RR( op "ne r",1, VAL1,", r",2, val, "") \
+ TEST_RR( op "eq r",1, VAL1,", r",2, val, ", lsl #3") \
+ TEST_RR( op "cc r",3, VAL1,", r",2, val, ", lsr #4") \
+ TEST_RR( op "cs r",3, VAL1,", r",2, val, ", asr #5") \
+ TEST_RR( op "pl r",5, VAL1,", r",2, N(val),", asr #6") \
+ TEST_RR( op "mi r",5, VAL1,", r",2, val, ", ror #7") \
+ TEST_RR( op "vc r",7, VAL1,", r",2, val, ", rrx") \
+ TEST_R ( op "vs r",7, VAL1,", pc, lsl #3") \
+ TEST_R ( op "vs r",7, VAL1,", sp, lsr #4") \
+ TEST_R( op "vs pc, r",7, VAL1,", asr #5") \
+ TEST_R( op "vs sp, r",7, VAL1,", ror #6") \
+ TEST_RRR( op "ls r",9, VAL1,", r",14,val, ", lsl r",0, 3,"") \
+ TEST_RRR( op "hi r",9, VAL1,", r",14,val, ", lsr r",7, 4,"") \
+ TEST_RRR( op "lt r",11,VAL1,", r",14,val, ", asr r",7, 5,"") \
+ TEST_RRR( op "ge r",11,VAL1,", r",14,N(val),", asr r",7, 6,"") \
+ TEST_RR( op "le r13" ", r",14,val, ", ror r",14,7,"") \
+ TEST_RR( op "gt r",0, val, ", r13" ", lsl r",14,8,"") \
+ TEST_R( op "eq r",11,VAL1,", #0xf5") \
+ TEST_R( op "ne r",0, VAL1,", #0xf5000000") \
+ TEST_R( op " r",8, VAL2,", #0x000af000")
+
+#define _DATA_PROCESSING_DM(op,s,val) \
+ TEST_R( op "eq" s " r0, r",1, val, "") \
+ TEST_R( op "ne" s " r1, r",1, val, ", lsl #3") \
+ TEST_R( op "cs" s " r2, r",3, val, ", lsr #4") \
+ TEST_R( op "cc" s " r3, r",3, val, ", asr #5") \
+ TEST_R( op "mi" s " r4, r",5, N(val),", asr #6") \
+ TEST_R( op "pl" s " r5, r",5, val, ", ror #7") \
+ TEST_R( op "vs" s " r6, r",10,val, ", rrx") \
+ TEST( op "vs" s " r7, pc, lsl #3") \
+ TEST( op "vs" s " r7, sp, lsr #4") \
+ TEST_RR( op "vc" s " r8, r",7, val, ", lsl r",0, 3,"") \
+ TEST_RR( op "hi" s " r9, r",9, val, ", lsr r",7, 4,"") \
+ TEST_RR( op "ls" s " r10, r",9, val, ", asr r",7, 5,"") \
+ TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \
+ TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \
+ TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \
+ TEST( op "eq" s " r0, #0xf5") \
+ TEST( op "ne" s " r11, #0xf5000000") \
+ TEST( op s " r7, #0x000af000") \
+ TEST( op s " r4, #0x00005a00")
+
+#define DATA_PROCESSING_DM(op,val) \
+ _DATA_PROCESSING_DM(op,"",val) \
+ _DATA_PROCESSING_DM(op,"s",val)
+
+ DATA_PROCESSING_DNM("and",0xf00f00ff)
+ DATA_PROCESSING_DNM("eor",0xf00f00ff)
+ DATA_PROCESSING_DNM("sub",VAL2)
+ DATA_PROCESSING_DNM("rsb",VAL2)
+ DATA_PROCESSING_DNM("add",VAL2)
+ DATA_PROCESSING_DNM("adc",VAL2)
+ DATA_PROCESSING_DNM("sbc",VAL2)
+ DATA_PROCESSING_DNM("rsc",VAL2)
+ DATA_PROCESSING_NM("tst",0xf00f00ff)
+ DATA_PROCESSING_NM("teq",0xf00f00ff)
+ DATA_PROCESSING_NM("cmp",VAL2)
+ DATA_PROCESSING_NM("cmn",VAL2)
+ DATA_PROCESSING_DNM("orr",0xf00f00ff)
+ DATA_PROCESSING_DM("mov",VAL2)
+ DATA_PROCESSING_DNM("bic",0xf00f00ff)
+ DATA_PROCESSING_DM("mvn",VAL2)
+
+ TEST("mov ip, sp") /* This has special case emulation code */
+
+ TEST_SUPPORTED("mov pc, #0x1000");
+ TEST_SUPPORTED("mov sp, #0x1000");
+ TEST_SUPPORTED("cmp pc, #0x1000");
+ TEST_SUPPORTED("cmp sp, #0x1000");
+
+ /* Data-processing with PC and a shift count in a register */
+ TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) " @ cmp r12, r14, asl pc")
+ TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) " @ mov r12, r14, asl pc")
+ TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) " @ add r10, r12, r14, asl pc")
+ TEST_UNSUPPORTED(__inst_arm(0xe151021f) " @ cmp r1, pc, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe17f0211) " @ cmn pc, r1, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe1a0121f) " @ mov r1, pc, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe1a0f211) " @ mov pc, r1, lsl r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe042131f) " @ sub r1, r2, pc, lsl r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe1cf1312) " @ bic r1, pc, r2, lsl r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe081f312) " @ add pc, r1, r2, lsl r3")
+
+ /* Data-processing with PC as a target and status registers updated */
+ TEST_UNSUPPORTED("movs pc, r1")
+ TEST_UNSUPPORTED("movs pc, r1, lsl r2")
+ TEST_UNSUPPORTED("movs pc, #0x10000")
+ TEST_UNSUPPORTED("adds pc, lr, r1")
+ TEST_UNSUPPORTED("adds pc, lr, r1, lsl r2")
+ TEST_UNSUPPORTED("adds pc, lr, #4")
+
+ /* Data-processing with SP as target */
+ TEST("add sp, sp, #16")
+ TEST("sub sp, sp, #8")
+ TEST("bic sp, sp, #0x20")
+ TEST("orr sp, sp, #0x20")
+ TEST_PR( "add sp, r",10,0,", r",11,4,"")
+ TEST_PRR("add sp, r",10,0,", r",11,4,", asl r",12,1,"")
+ TEST_P( "mov sp, r",10,0,"")
+ TEST_PR( "mov sp, r",10,0,", asl r",12,0,"")
+
+ /* Data-processing with PC as target */
+ TEST_BF( "add pc, pc, #2f-1b-8")
+ TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"")
+ TEST_BF_R ("add pc, r",14,2f-1f-8,", pc")
+ TEST_BF_R ("mov pc, r",0,2f,"")
+ TEST_BF_R ("add pc, pc, r",14,(2f-1f-8)*2,", asr #1")
+ TEST_BB( "sub pc, pc, #1b-2b+8")
+#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
+ TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
+#endif
+ TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
+ TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
+ TEST_R( "add pc, pc, r",10,-2,", asl #1")
+#ifdef CONFIG_THUMB2_KERNEL
+ TEST_ARM_TO_THUMB_INTERWORK_R("add pc, pc, r",0,3f-1f-8+1,"")
+ TEST_ARM_TO_THUMB_INTERWORK_R("sub pc, r",0,3f+8+1,", #8")
+#endif
+ TEST_GROUP("Miscellaneous instructions")
+
+ TEST_RMASKED("mrs r",0,~PSR_IGNORE_BITS,", cpsr")
+ TEST_RMASKED("mrspl r",7,~PSR_IGNORE_BITS,", cpsr")
+ TEST_RMASKED("mrs r",14,~PSR_IGNORE_BITS,", cpsr")
+ TEST_UNSUPPORTED(__inst_arm(0xe10ff000) " @ mrs r15, cpsr")
+ TEST_UNSUPPORTED("mrs r0, spsr")
+ TEST_UNSUPPORTED("mrs lr, spsr")
+
+ TEST_UNSUPPORTED("msr cpsr, r0")
+ TEST_UNSUPPORTED("msr cpsr_f, lr")
+ TEST_UNSUPPORTED("msr spsr, r0")
+
+#if __LINUX_ARM_ARCH__ >= 5 || \
+ (__LINUX_ARM_ARCH__ == 4 && !defined(CONFIG_CPU_32v4))
+ TEST_BF_R("bx r",0,2f,"")
+ TEST_BB_R("bx r",7,2f,"")
+ TEST_BF_R("bxeq r",14,2f,"")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 5
+ TEST_R("clz r0, r",0, 0x0,"")
+ TEST_R("clzeq r7, r",14,0x1,"")
+ TEST_R("clz lr, r",7, 0xffffffff,"")
+ TEST( "clz r4, sp")
+ TEST_UNSUPPORTED(__inst_arm(0x016fff10) " @ clz pc, r0")
+ TEST_UNSUPPORTED(__inst_arm(0x016f0f1f) " @ clz r0, pc")
+
+#if __LINUX_ARM_ARCH__ >= 6
+ TEST_UNSUPPORTED("bxj r0")
+#endif
+
+ TEST_BF_R("blx r",0,2f,"")
+ TEST_BB_R("blx r",7,2f,"")
+ TEST_BF_R("blxeq r",14,2f,"")
+ TEST_UNSUPPORTED(__inst_arm(0x0120003f) " @ blx pc")
+
+ TEST_RR( "qadd r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "qaddvs lr, r",9, VAL2,", r",8, VAL1,"")
+ TEST_R( "qadd lr, r",9, VAL2,", r13")
+ TEST_RR( "qsub r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "qsubvs lr, r",9, VAL2,", r",8, VAL1,"")
+ TEST_R( "qsub lr, r",9, VAL2,", r13")
+ TEST_RR( "qdadd r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "qdaddvs lr, r",9, VAL2,", r",8, VAL1,"")
+ TEST_R( "qdadd lr, r",9, VAL2,", r13")
+ TEST_RR( "qdsub r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "qdsubvs lr, r",9, VAL2,", r",8, VAL1,"")
+ TEST_R( "qdsub lr, r",9, VAL2,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe101f050) " @ qadd pc, r0, r1")
+ TEST_UNSUPPORTED(__inst_arm(0xe121f050) " @ qsub pc, r0, r1")
+ TEST_UNSUPPORTED(__inst_arm(0xe141f050) " @ qdadd pc, r0, r1")
+ TEST_UNSUPPORTED(__inst_arm(0xe161f050) " @ qdsub pc, r0, r1")
+ TEST_UNSUPPORTED(__inst_arm(0xe16f2050) " @ qdsub r2, r0, pc")
+ TEST_UNSUPPORTED(__inst_arm(0xe161205f) " @ qdsub r2, pc, r1")
+
+ TEST_UNSUPPORTED("bkpt 0xffff")
+ TEST_UNSUPPORTED("bkpt 0x0000")
+
+ TEST_UNSUPPORTED(__inst_arm(0xe1600070) " @ smc #0")
+
+ TEST_GROUP("Halfword multiply and multiply-accumulate")
+
+ TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlabbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "smlabb lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe10f3281) " @ smlabb pc, r1, r2, r3")
+ TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlatbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "smlatb lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe10f32a1) " @ smlatb pc, r1, r2, r3")
+ TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlabtge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "smlabt lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe10f32c1) " @ smlabt pc, r1, r2, r3")
+ TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlattge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "smlatt lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe10f32e1) " @ smlatt pc, r1, r2, r3")
+
+ TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlawbge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "smlawb lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe12f3281) " @ smlawb pc, r1, r2, r3")
+ TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlawtge r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "smlawt lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe12f32c1) " @ smlawt pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe12032cf) " @ smlawt r0, pc, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe1203fc1) " @ smlawt r0, r1, pc, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe120f2c1) " @ smlawt r0, r1, r2, pc")
+
+ TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulwbge r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_R( "smulwb lr, r",1, VAL2,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe12f02a1) " @ smulwb pc, r1, r2")
+ TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulwtge r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_R( "smulwt lr, r",1, VAL2,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe12f02e1) " @ smulwt pc, r1, r2")
+
+ TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlalbble r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRR( "smlalbb r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe14f1382) " @ smlalbb pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe141f382) " @ smlalbb r1, pc, r2, r3")
+ TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlaltble r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRR( "smlaltb r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe14f13a2) " @ smlaltb pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe141f3a2) " @ smlaltb r1, pc, r2, r3")
+ TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlalbtle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRR( "smlalbt r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe14f13c2) " @ smlalbt pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe141f3c2) " @ smlalbt r1, pc, r2, r3")
+ TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlalttle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRR( "smlaltt r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe14f13e2) " @ smlalbb pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe140f3e2) " @ smlalbb r0, pc, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe14013ef) " @ smlalbb r0, r1, pc, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe1401fe2) " @ smlalbb r0, r1, r2, pc")
+
+ TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulbbge r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_R( "smulbb lr, r",1, VAL2,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe16f0281) " @ smulbb pc, r1, r2")
+ TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smultbge r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_R( "smultb lr, r",1, VAL2,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe16f02a1) " @ smultb pc, r1, r2")
+ TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulbtge r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_R( "smulbt lr, r",1, VAL2,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe16f02c1) " @ smultb pc, r1, r2")
+ TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulttge r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_R( "smultt lr, r",1, VAL2,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc")
+#endif
+
+ TEST_GROUP("Multiply and multiply-accumulate")
+
+ TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "mulls r7, r",8, VAL2,", r",9, VAL2,"")
+ TEST_R( "mul lr, r",4, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe00f0291) " @ mul pc, r1, r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe000029f) " @ mul r0, pc, r2")
+ TEST_UNSUPPORTED(__inst_arm(0xe0000f91) " @ mul r0, r1, pc")
+ TEST_RR( "muls r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "mullss r7, r",8, VAL2,", r",9, VAL2,"")
+ TEST_R( "muls lr, r",4, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe01f0291) " @ muls pc, r1, r2")
+
+ TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "mlahi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "mla lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe02f3291) " @ mla pc, r1, r2, r3")
+ TEST_RRR( "mlas r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "mlahis r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "mlas lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe03f3291) " @ mlas pc, r1, r2, r3")
+
+#if __LINUX_ARM_ARCH__ >= 6
+ TEST_RR( "umaal r0, r1, r",2, VAL1,", r",3, VAL2,"")
+ TEST_RR( "umaalls r7, r8, r",9, VAL2,", r",10, VAL1,"")
+ TEST_R( "umaal lr, r12, r",11,VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe041f392) " @ umaal pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe04f0392) " @ umaal r0, pc, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0500090) " @ undef")
+ TEST_UNSUPPORTED(__inst_arm(0xe05fff9f) " @ undef")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "mlshi r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "mls lr, r",1, VAL2,", r",2, VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe06f3291) " @ mls pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe060329f) " @ mls r0, pc, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0603f91) " @ mls r0, r1, pc, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe060f291) " @ mls r0, r1, r2, pc")
+#endif
+
+ TEST_UNSUPPORTED(__inst_arm(0xe0700090) " @ undef")
+ TEST_UNSUPPORTED(__inst_arm(0xe07fff9f) " @ undef")
+
+ TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"")
+ TEST_RR( "umullls r7, r8, r",9, VAL2,", r",10, VAL1,"")
+ TEST_R( "umull lr, r12, r",11,VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe081f392) " @ umull pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe08f1392) " @ umull r1, pc, r2, r3")
+ TEST_RR( "umulls r0, r1, r",2, VAL1,", r",3, VAL2,"")
+ TEST_RR( "umulllss r7, r8, r",9, VAL2,", r",10, VAL1,"")
+ TEST_R( "umulls lr, r12, r",11,VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe091f392) " @ umulls pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe09f1392) " @ umulls r1, pc, r2, r3")
+
+ TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "umlalle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRR( "umlal r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe0af1392) " @ umlal pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0a1f392) " @ umlal r1, pc, r2, r3")
+ TEST_RRRR( "umlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "umlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRR( "umlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe0bf1392) " @ umlals pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0b1f392) " @ umlals r1, pc, r2, r3")
+
+ TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"")
+ TEST_RR( "smullls r7, r8, r",9, VAL2,", r",10, VAL1,"")
+ TEST_R( "smull lr, r12, r",11,VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe0c1f392) " @ smull pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0cf1392) " @ smull r1, pc, r2, r3")
+ TEST_RR( "smulls r0, r1, r",2, VAL1,", r",3, VAL2,"")
+ TEST_RR( "smulllss r7, r8, r",9, VAL2,", r",10, VAL1,"")
+ TEST_R( "smulls lr, r12, r",11,VAL3,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe0d1f392) " @ smulls pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0df1392) " @ smulls r1, pc, r2, r3")
+
+ TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlalle r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRR( "smlal r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe0ef1392) " @ smlal pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0e1f392) " @ smlal r1, pc, r2, r3")
+ TEST_RRRR( "smlals r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlalles r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRR( "smlals r",14,VAL3,", r",7, VAL4,", r",5, VAL1,", r13")
+ TEST_UNSUPPORTED(__inst_arm(0xe0ff1392) " @ smlals pc, r1, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0f0f392) " @ smlals r0, pc, r2, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0f0139f) " @ smlals r0, r1, pc, r3")
+ TEST_UNSUPPORTED(__inst_arm(0xe0f01f92) " @ smlals r0, r1, r2, pc")
+
+ TEST_GROUP("Synchronization primitives")
+
+#if __LINUX_ARM_ARCH__ < 6
+ TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]")
+ TEST_R( "swpvs r0, r",1,VAL1,", [sp]")
+ TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]")
+#else
+ TEST_UNSUPPORTED(__inst_arm(0xe108e097) " @ swp lr, r7, [r8]")
+ TEST_UNSUPPORTED(__inst_arm(0x610d0091) " @ swpvs r0, r1, [sp]")
+ TEST_UNSUPPORTED(__inst_arm(0xe10cd09e) " @ swp sp, r14 [r12]")
+#endif
+ TEST_UNSUPPORTED(__inst_arm(0xe102f091) " @ swp pc, r1, [r2]")
+ TEST_UNSUPPORTED(__inst_arm(0xe102009f) " @ swp r0, pc, [r2]")
+ TEST_UNSUPPORTED(__inst_arm(0xe10f0091) " @ swp r0, r1, [pc]")
+#if __LINUX_ARM_ARCH__ < 6
+ TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]")
+ TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
+#else
+ TEST_UNSUPPORTED(__inst_arm(0xe148e097) " @ swpb lr, r7, [r8]")
+ TEST_UNSUPPORTED(__inst_arm(0x614d0091) " @ swpvsb r0, r1, [sp]")
+#endif
+ TEST_UNSUPPORTED(__inst_arm(0xe142f091) " @ swpb pc, r1, [r2]")
+
+ TEST_UNSUPPORTED(__inst_arm(0xe1100090)) /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe1200090)) /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe1300090)) /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe1500090)) /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe1600090)) /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe1700090)) /* Unallocated space */
+#if __LINUX_ARM_ARCH__ >= 6
+ TEST_UNSUPPORTED("ldrex r2, [sp]")
+#endif
+#if (__LINUX_ARM_ARCH__ >= 7) || defined(CONFIG_CPU_32v6K)
+ TEST_UNSUPPORTED("strexd r0, r2, r3, [sp]")
+ TEST_UNSUPPORTED("ldrexd r2, r3, [sp]")
+ TEST_UNSUPPORTED("strexb r0, r2, [sp]")
+ TEST_UNSUPPORTED("ldrexb r2, [sp]")
+ TEST_UNSUPPORTED("strexh r0, r2, [sp]")
+ TEST_UNSUPPORTED("ldrexh r2, [sp]")
+#endif
+ TEST_GROUP("Extra load/store instructions")
+
+ TEST_RPR( "strh r",0, VAL1,", [r",1, 48,", -r",2, 24,"]")
+ TEST_RPR( "streqh r",14,VAL2,", [r",11,0, ", r",12, 48,"]")
+ TEST_UNSUPPORTED( "streqh r14, [r13, r12]")
+ TEST_UNSUPPORTED( "streqh r14, [r12, r13]")
+ TEST_RPR( "strh r",1, VAL1,", [r",2, 24,", r",3, 48,"]!")
+ TEST_RPR( "strneh r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
+ TEST_RPR( "strh r",2, VAL1,", [r",3, 24,"], r",4, 48,"")
+ TEST_RPR( "strh r",10,VAL2,", [r",9, 48,"], -r",11,24,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe1afc0ba) " @ strh r12, [pc, r10]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe089f0bb) " @ strh pc, [r9], r11")
+ TEST_UNSUPPORTED(__inst_arm(0xe089a0bf) " @ strh r10, [r9], pc")
+
+ TEST_PR( "ldrh r0, [r",0, 48,", -r",2, 24,"]")
+ TEST_PR( "ldrcsh r14, [r",13,0, ", r",12, 48,"]")
+ TEST_PR( "ldrh r1, [r",2, 24,", r",3, 48,"]!")
+ TEST_PR( "ldrcch r12, [r",11,48,", -r",10,24,"]!")
+ TEST_PR( "ldrh r2, [r",3, 24,"], r",4, 48,"")
+ TEST_PR( "ldrh r10, [r",9, 48,"], -r",11,24,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe1bfc0ba) " @ ldrh r12, [pc, r10]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe099f0bb) " @ ldrh pc, [r9], r11")
+ TEST_UNSUPPORTED(__inst_arm(0xe099a0bf) " @ ldrh r10, [r9], pc")
+
+ TEST_RP( "strh r",0, VAL1,", [r",1, 24,", #-2]")
+ TEST_RP( "strmih r",14,VAL2,", [r",13,0, ", #2]")
+ TEST_RP( "strh r",1, VAL1,", [r",2, 24,", #4]!")
+ TEST_RP( "strplh r",12,VAL2,", [r",11,24,", #-4]!")
+ TEST_RP( "strh r",2, VAL1,", [r",3, 24,"], #48")
+ TEST_RP( "strh r",10,VAL2,", [r",9, 64,"], #-48")
+ TEST_RP( "strh r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
+ TEST_UNSUPPORTED("strh r3, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_RP( "strh r",4, VAL1,", [r",14,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe1efc3b0) " @ strh r12, [pc, #48]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe0c9f3b0) " @ strh pc, [r9], #48")
+
+ TEST_P( "ldrh r0, [r",0, 24,", #-2]")
+ TEST_P( "ldrvsh r14, [r",13,0, ", #2]")
+ TEST_P( "ldrh r1, [r",2, 24,", #4]!")
+ TEST_P( "ldrvch r12, [r",11,24,", #-4]!")
+ TEST_P( "ldrh r2, [r",3, 24,"], #48")
+ TEST_P( "ldrh r10, [r",9, 64,"], #-48")
+ TEST( "ldrh r0, [pc, #0]")
+ TEST_UNSUPPORTED(__inst_arm(0xe1ffc3b0) " @ ldrh r12, [pc, #48]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe0d9f3b0) " @ ldrh pc, [r9], #48")
+
+ TEST_PR( "ldrsb r0, [r",0, 48,", -r",2, 24,"]")
+ TEST_PR( "ldrhisb r14, [r",13,0,", r",12, 48,"]")
+ TEST_PR( "ldrsb r1, [r",2, 24,", r",3, 48,"]!")
+ TEST_PR( "ldrlssb r12, [r",11,48,", -r",10,24,"]!")
+ TEST_PR( "ldrsb r2, [r",3, 24,"], r",4, 48,"")
+ TEST_PR( "ldrsb r10, [r",9, 48,"], -r",11,24,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe1bfc0da) " @ ldrsb r12, [pc, r10]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe099f0db) " @ ldrsb pc, [r9], r11")
+
+ TEST_P( "ldrsb r0, [r",0, 24,", #-1]")
+ TEST_P( "ldrgesb r14, [r",13,0, ", #1]")
+ TEST_P( "ldrsb r1, [r",2, 24,", #4]!")
+ TEST_P( "ldrltsb r12, [r",11,24,", #-4]!")
+ TEST_P( "ldrsb r2, [r",3, 24,"], #48")
+ TEST_P( "ldrsb r10, [r",9, 64,"], #-48")
+ TEST( "ldrsb r0, [pc, #0]")
+ TEST_UNSUPPORTED(__inst_arm(0xe1ffc3d0) " @ ldrsb r12, [pc, #48]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe0d9f3d0) " @ ldrsb pc, [r9], #48")
+
+ TEST_PR( "ldrsh r0, [r",0, 48,", -r",2, 24,"]")
+ TEST_PR( "ldrgtsh r14, [r",13,0, ", r",12, 48,"]")
+ TEST_PR( "ldrsh r1, [r",2, 24,", r",3, 48,"]!")
+ TEST_PR( "ldrlesh r12, [r",11,48,", -r",10,24,"]!")
+ TEST_PR( "ldrsh r2, [r",3, 24,"], r",4, 48,"")
+ TEST_PR( "ldrsh r10, [r",9, 48,"], -r",11,24,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe1bfc0fa) " @ ldrsh r12, [pc, r10]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe099f0fb) " @ ldrsh pc, [r9], r11")
+
+ TEST_P( "ldrsh r0, [r",0, 24,", #-1]")
+ TEST_P( "ldreqsh r14, [r",13,0 ,", #1]")
+ TEST_P( "ldrsh r1, [r",2, 24,", #4]!")
+ TEST_P( "ldrnesh r12, [r",11,24,", #-4]!")
+ TEST_P( "ldrsh r2, [r",3, 24,"], #48")
+ TEST_P( "ldrsh r10, [r",9, 64,"], #-48")
+ TEST( "ldrsh r0, [pc, #0]")
+ TEST_UNSUPPORTED(__inst_arm(0xe1ffc3f0) " @ ldrsh r12, [pc, #48]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe0d9f3f0) " @ ldrsh pc, [r9], #48")
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_UNSUPPORTED("strht r1, [r2], r3")
+ TEST_UNSUPPORTED("ldrht r1, [r2], r3")
+ TEST_UNSUPPORTED("strht r1, [r2], #48")
+ TEST_UNSUPPORTED("ldrht r1, [r2], #48")
+ TEST_UNSUPPORTED("ldrsbt r1, [r2], r3")
+ TEST_UNSUPPORTED("ldrsbt r1, [r2], #48")
+ TEST_UNSUPPORTED("ldrsht r1, [r2], r3")
+ TEST_UNSUPPORTED("ldrsht r1, [r2], #48")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 5
+ TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
+ TEST_RPR( "strccd r",8, VAL2,", [r",11,0, ", r",12,48,"]")
+ TEST_UNSUPPORTED( "strccd r8, [r13, r12]")
+ TEST_UNSUPPORTED( "strccd r8, [r12, r13]")
+ TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
+ TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
+ TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
+ TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe1afc0fa) " @ strd r12, [pc, r10]!")
+
+ TEST_PR( "ldrd r0, [r",0, 48,", -r",2,24,"]")
+ TEST_PR( "ldrmid r8, [r",13,0, ", r",12,48,"]")
+ TEST_PR( "ldrd r4, [r",2, 24,", r",3, 48,"]!")
+ TEST_PR( "ldrpld r6, [r",11,48,", -r",10,24,"]!")
+ TEST_PR( "ldrd r2, [r",5, 24,"], r",4,48,"")
+ TEST_PR( "ldrd r10, [r",9,48,"], -r",7,24,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe1afc0da) " @ ldrd r12, [pc, r10]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe089f0db) " @ ldrd pc, [r9], r11")
+ TEST_UNSUPPORTED(__inst_arm(0xe089e0db) " @ ldrd lr, [r9], r11")
+ TEST_UNSUPPORTED(__inst_arm(0xe089c0df) " @ ldrd r12, [r9], pc")
+
+ TEST_RP( "strd r",0, VAL1,", [r",1, 24,", #-8]")
+ TEST_RP( "strvsd r",8, VAL2,", [r",13,0, ", #8]")
+ TEST_RP( "strd r",4, VAL1,", [r",2, 24,", #16]!")
+ TEST_RP( "strvcd r",12,VAL2,", [r",11,24,", #-16]!")
+ TEST_RP( "strd r",2, VAL1,", [r",4, 24,"], #48")
+ TEST_RP( "strd r",10,VAL2,", [r",9, 64,"], #-48")
+ TEST_RP( "strd r",6, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
+ TEST_UNSUPPORTED("strd r6, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_RP( "strd r",4, VAL1,", [r",12,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe1efc3f0) " @ strd r12, [pc, #48]!")
+
+ TEST_P( "ldrd r0, [r",0, 24,", #-8]")
+ TEST_P( "ldrhid r8, [r",13,0, ", #8]")
+ TEST_P( "ldrd r4, [r",2, 24,", #16]!")
+ TEST_P( "ldrlsd r6, [r",11,24,", #-16]!")
+ TEST_P( "ldrd r2, [r",5, 24,"], #48")
+ TEST_P( "ldrd r10, [r",9,6,"], #-48")
+ TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) " @ ldrd pc, [r9], #48")
+ TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) " @ ldrd lr, [r9], #48")
+#endif
+
+ TEST_GROUP("Miscellaneous")
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST("movw r0, #0")
+ TEST("movw r0, #0xffff")
+ TEST("movw lr, #0xffff")
+ TEST_UNSUPPORTED(__inst_arm(0xe300f000) " @ movw pc, #0")
+ TEST_R("movt r",0, VAL1,", #0")
+ TEST_R("movt r",0, VAL2,", #0xffff")
+ TEST_R("movt r",14,VAL1,", #0xffff")
+ TEST_UNSUPPORTED(__inst_arm(0xe340f000) " @ movt pc, #0")
+#endif
+
+ TEST_UNSUPPORTED("msr cpsr, 0x13")
+ TEST_UNSUPPORTED("msr cpsr_f, 0xf0000000")
+ TEST_UNSUPPORTED("msr spsr, 0x13")
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_SUPPORTED("yield")
+ TEST("sev")
+ TEST("nop")
+ TEST("wfi")
+ TEST_SUPPORTED("wfe")
+ TEST_UNSUPPORTED("dbg #0")
+#endif
+
+ TEST_GROUP("Load/store word and unsigned byte")
+
+#define LOAD_STORE(byte) \
+ TEST_RP( "str"byte" r",0, VAL1,", [r",1, 24,", #-2]") \
+ TEST_RP( "str"byte" r",14,VAL2,", [r",13,0, ", #2]") \
+ TEST_RP( "str"byte" r",1, VAL1,", [r",2, 24,", #4]!") \
+ TEST_RP( "str"byte" r",12,VAL2,", [r",11,24,", #-4]!") \
+ TEST_RP( "str"byte" r",2, VAL1,", [r",3, 24,"], #48") \
+ TEST_RP( "str"byte" r",10,VAL2,", [r",9, 64,"], #-48") \
+ TEST_RP( "str"byte" r",3, VAL1,", [r",13,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") \
+ TEST_UNSUPPORTED("str"byte" r3, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") \
+ TEST_RP( "str"byte" r",4, VAL1,", [r",10,TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") \
+ TEST_RPR("str"byte" r",0, VAL1,", [r",1, 48,", -r",2, 24,"]") \
+ TEST_RPR("str"byte" r",14,VAL2,", [r",11,0, ", r",12, 48,"]") \
+ TEST_UNSUPPORTED("str"byte" r14, [r13, r12]") \
+ TEST_UNSUPPORTED("str"byte" r14, [r12, r13]") \
+ TEST_RPR("str"byte" r",1, VAL1,", [r",2, 24,", r",3, 48,"]!") \
+ TEST_RPR("str"byte" r",12,VAL2,", [r",11,48,", -r",10,24,"]!") \
+ TEST_RPR("str"byte" r",2, VAL1,", [r",3, 24,"], r",4, 48,"") \
+ TEST_RPR("str"byte" r",10,VAL2,", [r",9, 48,"], -r",11,24,"") \
+ TEST_RPR("str"byte" r",0, VAL1,", [r",1, 24,", r",2, 32,", asl #1]")\
+ TEST_RPR("str"byte" r",14,VAL2,", [r",11,0, ", r",12, 32,", lsr #2]")\
+ TEST_UNSUPPORTED("str"byte" r14, [r13, r12, lsr #2]") \
+ TEST_RPR("str"byte" r",1, VAL1,", [r",2, 24,", r",3, 32,", asr #3]!")\
+ TEST_RPR("str"byte" r",12,VAL2,", [r",11,24,", r",10, 4,", ror #31]!")\
+ TEST_P( "ldr"byte" r0, [r",0, 24,", #-2]") \
+ TEST_P( "ldr"byte" r14, [r",13,0, ", #2]") \
+ TEST_P( "ldr"byte" r1, [r",2, 24,", #4]!") \
+ TEST_P( "ldr"byte" r12, [r",11,24,", #-4]!") \
+ TEST_P( "ldr"byte" r2, [r",3, 24,"], #48") \
+ TEST_P( "ldr"byte" r10, [r",9, 64,"], #-48") \
+ TEST_PR( "ldr"byte" r0, [r",0, 48,", -r",2, 24,"]") \
+ TEST_PR( "ldr"byte" r14, [r",13,0, ", r",12, 48,"]") \
+ TEST_PR( "ldr"byte" r1, [r",2, 24,", r",3, 48,"]!") \
+ TEST_PR( "ldr"byte" r12, [r",11,48,", -r",10,24,"]!") \
+ TEST_PR( "ldr"byte" r2, [r",3, 24,"], r",4, 48,"") \
+ TEST_PR( "ldr"byte" r10, [r",9, 48,"], -r",11,24,"") \
+ TEST_PR( "ldr"byte" r0, [r",0, 24,", r",2, 32,", asl #1]") \
+ TEST_PR( "ldr"byte" r14, [r",13,0, ", r",12, 32,", lsr #2]") \
+ TEST_PR( "ldr"byte" r1, [r",2, 24,", r",3, 32,", asr #3]!") \
+ TEST_PR( "ldr"byte" r12, [r",11,24,", r",10, 4,", ror #31]!") \
+ TEST( "ldr"byte" r0, [pc, #0]") \
+ TEST_R( "ldr"byte" r12, [pc, r",14,0,"]")
+
+ LOAD_STORE("")
+ TEST_P( "str pc, [r",0,0,", #15*4]")
+ TEST_UNSUPPORTED( "str pc, [sp, r2]")
+ TEST_BF( "ldr pc, [sp, #15*4]")
+ TEST_BF_R("ldr pc, [sp, r",2,15*4,"]")
+
+ TEST_P( "str sp, [r",0,0,", #13*4]")
+ TEST_UNSUPPORTED( "str sp, [sp, r2]")
+ TEST_BF( "ldr sp, [sp, #13*4]")
+ TEST_BF_R("ldr sp, [sp, r",2,13*4,"]")
+
+#ifdef CONFIG_THUMB2_KERNEL
+ TEST_ARM_TO_THUMB_INTERWORK_P("ldr pc, [r",0,0,", #15*4]")
+#endif
+ TEST_UNSUPPORTED(__inst_arm(0xe5af6008) " @ str r6, [pc, #8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe7af6008) " @ str r6, [pc, r8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe5bf6008) " @ ldr r6, [pc, #8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe7bf6008) " @ ldr r6, [pc, r8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe788600f) " @ str r6, [r8, pc]")
+ TEST_UNSUPPORTED(__inst_arm(0xe798600f) " @ ldr r6, [r8, pc]")
+
+ LOAD_STORE("b")
+ TEST_UNSUPPORTED(__inst_arm(0xe5f7f008) " @ ldrb pc, [r7, #8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe7f7f008) " @ ldrb pc, [r7, r8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe5ef6008) " @ strb r6, [pc, #8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe7ef6008) " @ strb r6, [pc, r3]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe5ff6008) " @ ldrb r6, [pc, #8]!")
+ TEST_UNSUPPORTED(__inst_arm(0xe7ff6008) " @ ldrb r6, [pc, r3]!")
+
+ TEST_UNSUPPORTED("ldrt r0, [r1], #4")
+ TEST_UNSUPPORTED("ldrt r1, [r2], r3")
+ TEST_UNSUPPORTED("strt r2, [r3], #4")
+ TEST_UNSUPPORTED("strt r3, [r4], r5")
+ TEST_UNSUPPORTED("ldrbt r4, [r5], #4")
+ TEST_UNSUPPORTED("ldrbt r5, [r6], r7")
+ TEST_UNSUPPORTED("strbt r6, [r7], #4")
+ TEST_UNSUPPORTED("strbt r7, [r8], r9")
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_GROUP("Parallel addition and subtraction, signed")
+
+ TEST_UNSUPPORTED(__inst_arm(0xe6000010) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe60fffff) "") /* Unallocated space */
+
+ TEST_RR( "sadd16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sadd16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe61cff1a) " @ sadd16 pc, r12, r10")
+ TEST_RR( "sasx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sasx r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe61cff3a) " @ sasx pc, r12, r10")
+ TEST_RR( "ssax r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "ssax r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe61cff5a) " @ ssax pc, r12, r10")
+ TEST_RR( "ssub16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "ssub16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe61cff7a) " @ ssub16 pc, r12, r10")
+ TEST_RR( "sadd8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sadd8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe61cff9a) " @ sadd8 pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe61000b0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe61fffbf) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe61000d0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe61fffdf) "") /* Unallocated space */
+ TEST_RR( "ssub8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "ssub8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe61cfffa) " @ ssub8 pc, r12, r10")
+
+ TEST_RR( "qadd16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "qadd16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe62cff1a) " @ qadd16 pc, r12, r10")
+ TEST_RR( "qasx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "qasx r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe62cff3a) " @ qasx pc, r12, r10")
+ TEST_RR( "qsax r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "qsax r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe62cff5a) " @ qsax pc, r12, r10")
+ TEST_RR( "qsub16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "qsub16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe62cff7a) " @ qsub16 pc, r12, r10")
+ TEST_RR( "qadd8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "qadd8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe62cff9a) " @ qadd8 pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe62000b0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe62fffbf) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe62000d0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe62fffdf) "") /* Unallocated space */
+ TEST_RR( "qsub8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "qsub8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe62cfffa) " @ qsub8 pc, r12, r10")
+
+ TEST_RR( "shadd16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "shadd16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe63cff1a) " @ shadd16 pc, r12, r10")
+ TEST_RR( "shasx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "shasx r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe63cff3a) " @ shasx pc, r12, r10")
+ TEST_RR( "shsax r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "shsax r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe63cff5a) " @ shsax pc, r12, r10")
+ TEST_RR( "shsub16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "shsub16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe63cff7a) " @ shsub16 pc, r12, r10")
+ TEST_RR( "shadd8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "shadd8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe63cff9a) " @ shadd8 pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe63000b0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe63fffbf) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe63000d0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe63fffdf) "") /* Unallocated space */
+ TEST_RR( "shsub8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "shsub8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe63cfffa) " @ shsub8 pc, r12, r10")
+
+ TEST_GROUP("Parallel addition and subtraction, unsigned")
+
+ TEST_UNSUPPORTED(__inst_arm(0xe6400010) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe64fffff) "") /* Unallocated space */
+
+ TEST_RR( "uadd16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uadd16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe65cff1a) " @ uadd16 pc, r12, r10")
+ TEST_RR( "uasx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uasx r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe65cff3a) " @ uasx pc, r12, r10")
+ TEST_RR( "usax r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "usax r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe65cff5a) " @ usax pc, r12, r10")
+ TEST_RR( "usub16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "usub16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe65cff7a) " @ usub16 pc, r12, r10")
+ TEST_RR( "uadd8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uadd8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe65cff9a) " @ uadd8 pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe65000b0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe65fffbf) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe65000d0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe65fffdf) "") /* Unallocated space */
+ TEST_RR( "usub8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "usub8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe65cfffa) " @ usub8 pc, r12, r10")
+
+ TEST_RR( "uqadd16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uqadd16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe66cff1a) " @ uqadd16 pc, r12, r10")
+ TEST_RR( "uqasx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uqasx r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe66cff3a) " @ uqasx pc, r12, r10")
+ TEST_RR( "uqsax r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uqsax r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe66cff5a) " @ uqsax pc, r12, r10")
+ TEST_RR( "uqsub16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uqsub16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe66cff7a) " @ uqsub16 pc, r12, r10")
+ TEST_RR( "uqadd8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uqadd8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe66cff9a) " @ uqadd8 pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe66000b0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe66fffbf) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe66000d0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe66fffdf) "") /* Unallocated space */
+ TEST_RR( "uqsub8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uqsub8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe66cfffa) " @ uqsub8 pc, r12, r10")
+
+ TEST_RR( "uhadd16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uhadd16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe67cff1a) " @ uhadd16 pc, r12, r10")
+ TEST_RR( "uhasx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uhasx r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe67cff3a) " @ uhasx pc, r12, r10")
+ TEST_RR( "uhsax r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uhsax r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe67cff5a) " @ uhsax pc, r12, r10")
+ TEST_RR( "uhsub16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uhsub16 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe67cff7a) " @ uhsub16 pc, r12, r10")
+ TEST_RR( "uhadd8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uhadd8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe67cff9a) " @ uhadd8 pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe67000b0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe67fffbf) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe67000d0) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe67fffdf) "") /* Unallocated space */
+ TEST_RR( "uhsub8 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uhsub8 r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe67cfffa) " @ uhsub8 pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe67feffa) " @ uhsub8 r14, pc, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe67cefff) " @ uhsub8 r14, r12, pc")
+#endif /* __LINUX_ARM_ARCH__ >= 7 */
+
+#if __LINUX_ARM_ARCH__ >= 6
+ TEST_GROUP("Packing, unpacking, saturation, and reversal")
+
+ TEST_RR( "pkhbt r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2")
+ TEST_UNSUPPORTED(__inst_arm(0xe68cf11a) " @ pkhbt pc, r12, r10, lsl #2")
+ TEST_RR( "pkhtb r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2")
+ TEST_UNSUPPORTED(__inst_arm(0xe68cf15a) " @ pkhtb pc, r12, r10, asr #2")
+ TEST_UNSUPPORTED(__inst_arm(0xe68fe15a) " @ pkhtb r14, pc, r10, asr #2")
+ TEST_UNSUPPORTED(__inst_arm(0xe68ce15f) " @ pkhtb r14, r12, pc, asr #2")
+ TEST_UNSUPPORTED(__inst_arm(0xe6900010) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe69fffdf) "") /* Unallocated space */
+
+ TEST_R( "ssat r0, #24, r",0, VAL1,"")
+ TEST_R( "ssat r14, #24, r",12, VAL2,"")
+ TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8")
+ TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8")
+ TEST_UNSUPPORTED(__inst_arm(0xe6b7f01c) " @ ssat pc, #24, r12")
+
+ TEST_R( "usat r0, #24, r",0, VAL1,"")
+ TEST_R( "usat r14, #24, r",12, VAL2,"")
+ TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8")
+ TEST_R( "usat r14, #24, r",12, VAL2,", asr #8")
+ TEST_UNSUPPORTED(__inst_arm(0xe6f7f01c) " @ usat pc, #24, r12")
+
+ TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "sxtb16 r8, r",7, HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe68cf47a) " @ sxtab16 pc,r12, r10, ror #8")
+
+ TEST_RR( "sel r0, r",0, VAL1,", r",1, VAL2,"")
+ TEST_RR( "sel r14, r",12,VAL1,", r",10, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe68cffba) " @ sel pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe68fefba) " @ sel r14, pc, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe68cefbf) " @ sel r14, r12, pc")
+
+ TEST_R( "ssat16 r0, #12, r",0, HH1,"")
+ TEST_R( "ssat16 r14, #12, r",12, HH2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6abff3c) " @ ssat16 pc, #12, r12")
+
+ TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "sxtb r8, r",7, HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6acf47a) " @ sxtab pc,r12, r10, ror #8")
+
+ TEST_R( "rev r0, r",0, VAL1,"")
+ TEST_R( "rev r14, r",12, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6bfff3c) " @ rev pc, r12")
+
+ TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "sxth r8, r",7, HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6bcf47a) " @ sxtah pc,r12, r10, ror #8")
+
+ TEST_R( "rev16 r0, r",0, VAL1,"")
+ TEST_R( "rev16 r14, r",12, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6bfffbc) " @ rev16 pc, r12")
+
+ TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "uxtb16 r8, r",7, HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6ccf47a) " @ uxtab16 pc,r12, r10, ror #8")
+
+ TEST_R( "usat16 r0, #12, r",0, HH1,"")
+ TEST_R( "usat16 r14, #12, r",12, HH2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6ecff3c) " @ usat16 pc, #12, r12")
+ TEST_UNSUPPORTED(__inst_arm(0xe6ecef3f) " @ usat16 r14, #12, pc")
+
+ TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "uxtb r8, r",7, HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6ecf47a) " @ uxtab pc,r12, r10, ror #8")
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_R( "rbit r0, r",0, VAL1,"")
+ TEST_R( "rbit r14, r",12, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6ffff3c) " @ rbit pc, r12")
+#endif
+
+ TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "uxth r8, r",7, HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6fff077) " @ uxth pc, r7")
+ TEST_UNSUPPORTED(__inst_arm(0xe6ff807f) " @ uxth r8, pc")
+ TEST_UNSUPPORTED(__inst_arm(0xe6fcf47a) " @ uxtah pc, r12, r10, ror #8")
+ TEST_UNSUPPORTED(__inst_arm(0xe6fce47f) " @ uxtah r14, r12, pc, ror #8")
+
+ TEST_R( "revsh r0, r",0, VAL1,"")
+ TEST_R( "revsh r14, r",12, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe6ffff3c) " @ revsh pc, r12")
+ TEST_UNSUPPORTED(__inst_arm(0xe6ffef3f) " @ revsh r14, pc")
+
+ TEST_UNSUPPORTED(__inst_arm(0xe6900070) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe69fff7f) "") /* Unallocated space */
+
+ TEST_UNSUPPORTED(__inst_arm(0xe6d00070) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_arm(0xe6dfff7f) "") /* Unallocated space */
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#if __LINUX_ARM_ARCH__ >= 6
+ TEST_GROUP("Signed multiplies")
+
+ TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
+ TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe70f8a1c) " @ smlad pc, r12, r10, r8")
+ TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
+ TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe70f8a3c) " @ smladx pc, r12, r10, r8")
+
+ TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe70ffa1c) " @ smuad pc, r12, r10")
+ TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe70ffa3c) " @ smuadx pc, r12, r10")
+
+ TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
+ TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe70f8a5c) " @ smlsd pc, r12, r10, r8")
+ TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
+ TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe70f8a7c) " @ smlsdx pc, r12, r10, r8")
+
+ TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe70ffa5c) " @ smusd pc, r12, r10")
+ TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe70ffa7c) " @ smusdx pc, r12, r10")
+
+ TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+ TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+ TEST_UNSUPPORTED(__inst_arm(0xe74af819) " @ smlald pc, r10, r9, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe74fb819) " @ smlald r11, pc, r9, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe74ab81f) " @ smlald r11, r10, pc, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe74abf19) " @ smlald r11, r10, r9, pc")
+
+ TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+ TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+ TEST_UNSUPPORTED(__inst_arm(0xe74af839) " @ smlaldx pc, r10, r9, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe74fb839) " @ smlaldx r11, pc, r9, r8")
+
+ TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
+ TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe75f8a1c) " @ smmla pc, r12, r10, r8")
+ TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
+ TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe75f8a3c) " @ smmlar pc, r12, r10, r8")
+
+ TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"")
+ TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe75ffa1c) " @ smmul pc, r12, r10")
+ TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"")
+ TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe75ffa3c) " @ smmulr pc, r12, r10")
+
+ TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
+ TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe75f8adc) " @ smmls pc, r12, r10, r8")
+ TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
+ TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe75f8afc) " @ smmlsr pc, r12, r10, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe75e8aff) " @ smmlsr r14, pc, r10, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe75e8ffc) " @ smmlsr r14, r12, pc, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe75efafc) " @ smmlsr r14, r12, r10, pc")
+
+ TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"")
+ TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe75ffa1c) " @ usad8 pc, r12, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe75efa1f) " @ usad8 r14, pc, r10")
+ TEST_UNSUPPORTED(__inst_arm(0xe75eff1c) " @ usad8 r14, r12, pc")
+
+ TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"")
+ TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
+ TEST_UNSUPPORTED(__inst_arm(0xe78f8a1c) " @ usada8 pc, r12, r10, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe78e8a1f) " @ usada8 r14, pc, r10, r8")
+ TEST_UNSUPPORTED(__inst_arm(0xe78e8f1c) " @ usada8 r14, r12, pc, r8")
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_GROUP("Bit Field")
+
+ TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31")
+ TEST_R( "sbfxeq r14, r",12, VAL2,", #8, #16")
+ TEST_R( "sbfx r4, r",10, VAL1,", #16, #15")
+ TEST_UNSUPPORTED(__inst_arm(0xe7aff45c) " @ sbfx pc, r12, #8, #16")
+
+ TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31")
+ TEST_R( "ubfxcs r14, r",12, VAL2,", #8, #16")
+ TEST_R( "ubfx r4, r",10, VAL1,", #16, #15")
+ TEST_UNSUPPORTED(__inst_arm(0xe7eff45c) " @ ubfx pc, r12, #8, #16")
+ TEST_UNSUPPORTED(__inst_arm(0xe7efc45f) " @ ubfx r12, pc, #8, #16")
+
+ TEST_R( "bfc r",0, VAL1,", #4, #20")
+ TEST_R( "bfcvs r",14,VAL2,", #4, #20")
+ TEST_R( "bfc r",7, VAL1,", #0, #31")
+ TEST_R( "bfc r",8, VAL2,", #0, #31")
+ TEST_UNSUPPORTED(__inst_arm(0xe7def01f) " @ bfc pc, #0, #31");
+
+ TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31")
+ TEST_RR( "bfipl r",12,VAL1,", r",14 , VAL2,", #4, #20")
+ TEST_UNSUPPORTED(__inst_arm(0xe7d7f21e) " @ bfi pc, r14, #4, #20")
+
+ TEST_UNSUPPORTED(__inst_arm(0x07f000f0) "") /* Permanently UNDEFINED */
+ TEST_UNSUPPORTED(__inst_arm(0x07ffffff) "") /* Permanently UNDEFINED */
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+ TEST_GROUP("Branch, branch with link, and block data transfer")
+
+ TEST_P( "stmda r",0, 16*4,", {r0}")
+ TEST_P( "stmeqda r",4, 16*4,", {r0-r15}")
+ TEST_P( "stmneda r",8, 16*4,"!, {r8-r15}")
+ TEST_P( "stmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_P( "stmda r",13,0, "!, {pc}")
+
+ TEST_P( "ldmda r",0, 16*4,", {r0}")
+ TEST_BF_P("ldmcsda r",4, 15*4,", {r0-r15}")
+ TEST_BF_P("ldmccda r",7, 15*4,"!, {r8-r15}")
+ TEST_P( "ldmda r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_BF_P("ldmda r",14,15*4,"!, {pc}")
+
+ TEST_P( "stmia r",0, 16*4,", {r0}")
+ TEST_P( "stmmiia r",4, 16*4,", {r0-r15}")
+ TEST_P( "stmplia r",8, 16*4,"!, {r8-r15}")
+ TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_P( "stmia r",14,0, "!, {pc}")
+
+ TEST_P( "ldmia r",0, 16*4,", {r0}")
+ TEST_BF_P("ldmvsia r",4, 0, ", {r0-r15}")
+ TEST_BF_P("ldmvcia r",7, 8*4, "!, {r8-r15}")
+ TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_BF_P("ldmia r",14,15*4,"!, {pc}")
+
+ TEST_P( "stmdb r",0, 16*4,", {r0}")
+ TEST_P( "stmhidb r",4, 16*4,", {r0-r15}")
+ TEST_P( "stmlsdb r",8, 16*4,"!, {r8-r15}")
+ TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_P( "stmdb r",13,4, "!, {pc}")
+
+ TEST_P( "ldmdb r",0, 16*4,", {r0}")
+ TEST_BF_P("ldmgedb r",4, 16*4,", {r0-r15}")
+ TEST_BF_P("ldmltdb r",7, 16*4,"!, {r8-r15}")
+ TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_BF_P("ldmdb r",14,16*4,"!, {pc}")
+
+ TEST_P( "stmib r",0, 16*4,", {r0}")
+ TEST_P( "stmgtib r",4, 16*4,", {r0-r15}")
+ TEST_P( "stmleib r",8, 16*4,"!, {r8-r15}")
+ TEST_P( "stmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_P( "stmib r",13,-4, "!, {pc}")
+
+ TEST_P( "ldmib r",0, 16*4,", {r0}")
+ TEST_BF_P("ldmeqib r",4, -4,", {r0-r15}")
+ TEST_BF_P("ldmneib r",7, 7*4,"!, {r8-r15}")
+ TEST_P( "ldmib r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_BF_P("ldmib r",14,14*4,"!, {pc}")
+
+ TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}")
+ TEST_P( "stmeqdb r",13,16*4,"!, {r3-r12}")
+ TEST_P( "stmnedb r",2, 16*4,", {r3-r12,lr}")
+ TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}")
+ TEST_P( "stmdb r",0, 16*4,", {r0-r12}")
+ TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}")
+
+ TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}")
+ TEST_P( "ldmccia r",13,5*4, "!, {r3-r12}")
+ TEST_BF_P("ldmcsia r",2, 5*4, "!, {r3-r12,pc}")
+ TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}")
+ TEST_P( "ldmia r",0, 16*4,", {r0-r12}")
+ TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}")
+
+#ifdef CONFIG_THUMB2_KERNEL
+ TEST_ARM_TO_THUMB_INTERWORK_P("ldmplia r",0,15*4,", {pc}")
+ TEST_ARM_TO_THUMB_INTERWORK_P("ldmmiia r",13,0,", {r0-r15}")
+#endif
+ TEST_BF("b 2f")
+ TEST_BF("bl 2f")
+ TEST_BB("b 2b")
+ TEST_BB("bl 2b")
+
+ TEST_BF("beq 2f")
+ TEST_BF("bleq 2f")
+ TEST_BB("bne 2b")
+ TEST_BB("blne 2b")
+
+ TEST_BF("bgt 2f")
+ TEST_BF("blgt 2f")
+ TEST_BB("blt 2b")
+ TEST_BB("bllt 2b")
+
+ TEST_GROUP("Supervisor Call, and coprocessor instructions")
+
+ /*
+ * We can't really test these by executing them, so all
+ * we can do is check that probes are, or are not allowed.
+ * At the moment none are allowed...
+ */
+#define TEST_COPROCESSOR(code) TEST_UNSUPPORTED(code)
+
+#define COPROCESSOR_INSTRUCTIONS_ST_LD(two,cc) \
+ TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]") \
+ TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]") \
+ TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #4]!") \
+ TEST_COPROCESSOR("stc"two" 0, cr0, [r13, #-4]!") \
+ TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #4") \
+ TEST_COPROCESSOR("stc"two" 0, cr0, [r13], #-4") \
+ TEST_COPROCESSOR("stc"two" 0, cr0, [r13], {1}") \
+ TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]") \
+ TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]") \
+ TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #4]!") \
+ TEST_COPROCESSOR("stc"two"l 0, cr0, [r13, #-4]!") \
+ TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #4") \
+ TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], #-4") \
+ TEST_COPROCESSOR("stc"two"l 0, cr0, [r13], {1}") \
+ TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]") \
+ TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]") \
+ TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #4]!") \
+ TEST_COPROCESSOR("ldc"two" 0, cr0, [r13, #-4]!") \
+ TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #4") \
+ TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], #-4") \
+ TEST_COPROCESSOR("ldc"two" 0, cr0, [r13], {1}") \
+ TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]") \
+ TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]") \
+ TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #4]!") \
+ TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13, #-4]!") \
+ TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #4") \
+ TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], #-4") \
+ TEST_COPROCESSOR("ldc"two"l 0, cr0, [r13], {1}") \
+ \
+ TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #4]") \
+ TEST_COPROCESSOR( "stc"two" 0, cr0, [r15, #-4]") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##daf0001) " @ stc"two" 0, cr0, [r15, #4]!") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##d2f0001) " @ stc"two" 0, cr0, [r15, #-4]!") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##caf0001) " @ stc"two" 0, cr0, [r15], #4") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##c2f0001) " @ stc"two" 0, cr0, [r15], #-4") \
+ TEST_COPROCESSOR( "stc"two" 0, cr0, [r15], {1}") \
+ TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #4]") \
+ TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15, #-4]") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##def0001) " @ stc"two"l 0, cr0, [r15, #4]!") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##d6f0001) " @ stc"two"l 0, cr0, [r15, #-4]!") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##cef0001) " @ stc"two"l 0, cr0, [r15], #4") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##c6f0001) " @ stc"two"l 0, cr0, [r15], #-4") \
+ TEST_COPROCESSOR( "stc"two"l 0, cr0, [r15], {1}") \
+ TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #4]") \
+ TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15, #-4]") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##dbf0001) " @ ldc"two" 0, cr0, [r15, #4]!") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##d3f0001) " @ ldc"two" 0, cr0, [r15, #-4]!") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##cbf0001) " @ ldc"two" 0, cr0, [r15], #4") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##c3f0001) " @ ldc"two" 0, cr0, [r15], #-4") \
+ TEST_COPROCESSOR( "ldc"two" 0, cr0, [r15], {1}") \
+ TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #4]") \
+ TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15, #-4]") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##dff0001) " @ ldc"two"l 0, cr0, [r15, #4]!") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##d7f0001) " @ ldc"two"l 0, cr0, [r15, #-4]!") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##cff0001) " @ ldc"two"l 0, cr0, [r15], #4") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##c7f0001) " @ ldc"two"l 0, cr0, [r15], #-4") \
+ TEST_COPROCESSOR( "ldc"two"l 0, cr0, [r15], {1}")
+
+#define COPROCESSOR_INSTRUCTIONS_MC_MR(two,cc) \
+ \
+ TEST_COPROCESSOR( "mcrr"two" 0, 15, r0, r14, cr0") \
+ TEST_COPROCESSOR( "mcrr"two" 15, 0, r14, r0, cr15") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##c4f00f0) " @ mcrr"two" 0, 15, r0, r15, cr0") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##c40ff0f) " @ mcrr"two" 15, 0, r15, r0, cr15") \
+ TEST_COPROCESSOR( "mrrc"two" 0, 15, r0, r14, cr0") \
+ TEST_COPROCESSOR( "mrrc"two" 15, 0, r14, r0, cr15") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##c5f00f0) " @ mrrc"two" 0, 15, r0, r15, cr0") \
+ TEST_UNSUPPORTED(__inst_arm(0x##cc##c50ff0f) " @ mrrc"two" 15, 0, r15, r0, cr15") \
+ TEST_COPROCESSOR( "cdp"two" 15, 15, cr15, cr15, cr15, 7") \
+ TEST_COPROCESSOR( "cdp"two" 0, 0, cr0, cr0, cr0, 0") \
+ TEST_COPROCESSOR( "mcr"two" 15, 7, r15, cr15, cr15, 7") \
+ TEST_COPROCESSOR( "mcr"two" 0, 0, r0, cr0, cr0, 0") \
+ TEST_COPROCESSOR( "mrc"two" 15, 7, r15, cr15, cr15, 7") \
+ TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0")
+
+ COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
+#if __LINUX_ARM_ARCH__ >= 5
+ COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
+#endif
+ TEST_UNSUPPORTED("svc 0")
+ TEST_UNSUPPORTED("svc 0xffffff")
+
+ TEST_UNSUPPORTED("svc 0")
+
+ TEST_GROUP("Unconditional instruction")
+
+#if __LINUX_ARM_ARCH__ >= 6
+ TEST_UNSUPPORTED("srsda sp, 0x13")
+ TEST_UNSUPPORTED("srsdb sp, 0x13")
+ TEST_UNSUPPORTED("srsia sp, 0x13")
+ TEST_UNSUPPORTED("srsib sp, 0x13")
+ TEST_UNSUPPORTED("srsda sp!, 0x13")
+ TEST_UNSUPPORTED("srsdb sp!, 0x13")
+ TEST_UNSUPPORTED("srsia sp!, 0x13")
+ TEST_UNSUPPORTED("srsib sp!, 0x13")
+
+ TEST_UNSUPPORTED("rfeda sp")
+ TEST_UNSUPPORTED("rfedb sp")
+ TEST_UNSUPPORTED("rfeia sp")
+ TEST_UNSUPPORTED("rfeib sp")
+ TEST_UNSUPPORTED("rfeda sp!")
+ TEST_UNSUPPORTED("rfedb sp!")
+ TEST_UNSUPPORTED("rfeia sp!")
+ TEST_UNSUPPORTED("rfeib sp!")
+ TEST_UNSUPPORTED(__inst_arm(0xf81d0a00) " @ rfeda pc")
+ TEST_UNSUPPORTED(__inst_arm(0xf91d0a00) " @ rfedb pc")
+ TEST_UNSUPPORTED(__inst_arm(0xf89d0a00) " @ rfeia pc")
+ TEST_UNSUPPORTED(__inst_arm(0xf99d0a00) " @ rfeib pc")
+ TEST_UNSUPPORTED(__inst_arm(0xf83d0a00) " @ rfeda pc!")
+ TEST_UNSUPPORTED(__inst_arm(0xf93d0a00) " @ rfedb pc!")
+ TEST_UNSUPPORTED(__inst_arm(0xf8bd0a00) " @ rfeia pc!")
+ TEST_UNSUPPORTED(__inst_arm(0xf9bd0a00) " @ rfeib pc!")
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#if __LINUX_ARM_ARCH__ >= 6
+ TEST_X( "blx __dummy_thumb_subroutine_even",
+ ".thumb \n\t"
+ ".space 4 \n\t"
+ ".type __dummy_thumb_subroutine_even, %%function \n\t"
+ "__dummy_thumb_subroutine_even: \n\t"
+ "mov r0, pc \n\t"
+ "bx lr \n\t"
+ ".arm \n\t"
+ )
+ TEST( "blx __dummy_thumb_subroutine_even")
+
+ TEST_X( "blx __dummy_thumb_subroutine_odd",
+ ".thumb \n\t"
+ ".space 2 \n\t"
+ ".type __dummy_thumb_subroutine_odd, %%function \n\t"
+ "__dummy_thumb_subroutine_odd: \n\t"
+ "mov r0, pc \n\t"
+ "bx lr \n\t"
+ ".arm \n\t"
+ )
+ TEST( "blx __dummy_thumb_subroutine_odd")
+#endif /* __LINUX_ARM_ARCH__ >= 6 */
+
+#if __LINUX_ARM_ARCH__ >= 5
+ COPROCESSOR_INSTRUCTIONS_ST_LD("2",f)
+#endif
+#if __LINUX_ARM_ARCH__ >= 6
+ COPROCESSOR_INSTRUCTIONS_MC_MR("2",f)
+#endif
+
+ TEST_GROUP("Miscellaneous instructions, memory hints, and Advanced SIMD instructions")
+
+#if __LINUX_ARM_ARCH__ >= 6
+ TEST_UNSUPPORTED("cps 0x13")
+ TEST_UNSUPPORTED("cpsie i")
+ TEST_UNSUPPORTED("cpsid i")
+ TEST_UNSUPPORTED("cpsie i,0x13")
+ TEST_UNSUPPORTED("cpsid i,0x13")
+ TEST_UNSUPPORTED("setend le")
+ TEST_UNSUPPORTED("setend be")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_P("pli [r",0,0b,", #16]")
+ TEST( "pli [pc, #0]")
+ TEST_RR("pli [r",12,0b,", r",0, 16,"]")
+ TEST_RR("pli [r",0, 0b,", -r",12,16,", lsl #4]")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 5
+ TEST_P("pld [r",0,32,", #-16]")
+ TEST( "pld [pc, #0]")
+ TEST_PR("pld [r",7, 24, ", r",0, 16,"]")
+ TEST_PR("pld [r",8, 24, ", -r",12,16,", lsl #4]")
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_SUPPORTED( __inst_arm(0xf590f000) " @ pldw [r0, #0]")
+ TEST_SUPPORTED( __inst_arm(0xf797f000) " @ pldw [r7, r0]")
+ TEST_SUPPORTED( __inst_arm(0xf798f18c) " @ pldw [r8, r12, lsl #3]");
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+ TEST_UNSUPPORTED("clrex")
+ TEST_UNSUPPORTED("dsb")
+ TEST_UNSUPPORTED("dmb")
+ TEST_UNSUPPORTED("isb")
+#endif
+
+ verbose("\n");
+}
+
diff --git a/arch/arm/probes/kprobes/test-core.c b/arch/arm/probes/kprobes/test-core.c
new file mode 100644
index 000000000..cc237fa9b
--- /dev/null
+++ b/arch/arm/probes/kprobes/test-core.c
@@ -0,0 +1,1676 @@
+/*
+ * arch/arm/kernel/kprobes-test.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * This file contains test code for ARM kprobes.
+ *
+ * The top level function run_all_tests() executes tests for all of the
+ * supported instruction sets: ARM, 16-bit Thumb, and 32-bit Thumb. These tests
+ * fall into two categories; run_api_tests() checks basic functionality of the
+ * kprobes API, and run_test_cases() is a comprehensive test for kprobes
+ * instruction decoding and simulation.
+ *
+ * run_test_cases() first checks the kprobes decoding table for self consistency
+ * (using table_test()) then executes a series of test cases for each of the CPU
+ * instruction forms. coverage_start() and coverage_end() are used to verify
+ * that these test cases cover all of the possible combinations of instructions
+ * described by the kprobes decoding tables.
+ *
+ * The individual test cases are in kprobes-test-arm.c and kprobes-test-thumb.c
+ * which use the macros defined in kprobes-test.h. The rest of this
+ * documentation will describe the operation of the framework used by these
+ * test cases.
+ */
+
+/*
+ * TESTING METHODOLOGY
+ * -------------------
+ *
+ * The methodology used to test an ARM instruction 'test_insn' is to use
+ * inline assembler like:
+ *
+ * test_before: nop
+ * test_case: test_insn
+ * test_after: nop
+ *
+ * When the test case is run a kprobe is placed of each nop. The
+ * post-handler of the test_before probe is used to modify the saved CPU
+ * register context to that which we require for the test case. The
+ * pre-handler of the of the test_after probe saves a copy of the CPU
+ * register context. In this way we can execute test_insn with a specific
+ * register context and see the results afterwards.
+ *
+ * To actually test the kprobes instruction emulation we perform the above
+ * step a second time but with an additional kprobe on the test_case
+ * instruction itself. If the emulation is accurate then the results seen
+ * by the test_after probe will be identical to the first run which didn't
+ * have a probe on test_case.
+ *
+ * Each test case is run several times with a variety of variations in the
+ * flags value of stored in CPSR, and for Thumb code, different ITState.
+ *
+ * For instructions which can modify PC, a second test_after probe is used
+ * like this:
+ *
+ * test_before: nop
+ * test_case: test_insn
+ * test_after: nop
+ * b test_done
+ * test_after2: nop
+ * test_done:
+ *
+ * The test case is constructed such that test_insn branches to
+ * test_after2, or, if testing a conditional instruction, it may just
+ * continue to test_after. The probes inserted at both locations let us
+ * determine which happened. A similar approach is used for testing
+ * backwards branches...
+ *
+ * b test_before
+ * b test_done @ helps to cope with off by 1 branches
+ * test_after2: nop
+ * b test_done
+ * test_before: nop
+ * test_case: test_insn
+ * test_after: nop
+ * test_done:
+ *
+ * The macros used to generate the assembler instructions describe above
+ * are TEST_INSTRUCTION, TEST_BRANCH_F (branch forwards) and TEST_BRANCH_B
+ * (branch backwards). In these, the local variables numbered 1, 50, 2 and
+ * 99 represent: test_before, test_case, test_after2 and test_done.
+ *
+ * FRAMEWORK
+ * ---------
+ *
+ * Each test case is wrapped between the pair of macros TESTCASE_START and
+ * TESTCASE_END. As well as performing the inline assembler boilerplate,
+ * these call out to the kprobes_test_case_start() and
+ * kprobes_test_case_end() functions which drive the execution of the test
+ * case. The specific arguments to use for each test case are stored as
+ * inline data constructed using the various TEST_ARG_* macros. Putting
+ * this all together, a simple test case may look like:
+ *
+ * TESTCASE_START("Testing mov r0, r7")
+ * TEST_ARG_REG(7, 0x12345678) // Set r7=0x12345678
+ * TEST_ARG_END("")
+ * TEST_INSTRUCTION("mov r0, r7")
+ * TESTCASE_END
+ *
+ * Note, in practice the single convenience macro TEST_R would be used for this
+ * instead.
+ *
+ * The above would expand to assembler looking something like:
+ *
+ * @ TESTCASE_START
+ * bl __kprobes_test_case_start
+ * .pushsection .rodata
+ * "10:
+ * .ascii "mov r0, r7" @ text title for test case
+ * .byte 0
+ * .popsection
+ * @ start of inline data...
+ * .word 10b @ pointer to title in .rodata section
+ *
+ * @ TEST_ARG_REG
+ * .byte ARG_TYPE_REG
+ * .byte 7
+ * .short 0
+ * .word 0x1234567
+ *
+ * @ TEST_ARG_END
+ * .byte ARG_TYPE_END
+ * .byte TEST_ISA @ flags, including ISA being tested
+ * .short 50f-0f @ offset of 'test_before'
+ * .short 2f-0f @ offset of 'test_after2' (if relevent)
+ * .short 99f-0f @ offset of 'test_done'
+ * @ start of test case code...
+ * 0:
+ * .code TEST_ISA @ switch to ISA being tested
+ *
+ * @ TEST_INSTRUCTION
+ * 50: nop @ location for 'test_before' probe
+ * 1: mov r0, r7 @ the test case instruction 'test_insn'
+ * nop @ location for 'test_after' probe
+ *
+ * // TESTCASE_END
+ * 2:
+ * 99: bl __kprobes_test_case_end_##TEST_ISA
+ * .code NONMAL_ISA
+ *
+ * When the above is execute the following happens...
+ *
+ * __kprobes_test_case_start() is an assembler wrapper which sets up space
+ * for a stack buffer and calls the C function kprobes_test_case_start().
+ * This C function will do some initial processing of the inline data and
+ * setup some global state. It then inserts the test_before and test_after
+ * kprobes and returns a value which causes the assembler wrapper to jump
+ * to the start of the test case code, (local label '0').
+ *
+ * When the test case code executes, the test_before probe will be hit and
+ * test_before_post_handler will call setup_test_context(). This fills the
+ * stack buffer and CPU registers with a test pattern and then processes
+ * the test case arguments. In our example there is one TEST_ARG_REG which
+ * indicates that R7 should be loaded with the value 0x12345678.
+ *
+ * When the test_before probe ends, the test case continues and executes
+ * the "mov r0, r7" instruction. It then hits the test_after probe and the
+ * pre-handler for this (test_after_pre_handler) will save a copy of the
+ * CPU register context. This should now have R0 holding the same value as
+ * R7.
+ *
+ * Finally we get to the call to __kprobes_test_case_end_{32,16}. This is
+ * an assembler wrapper which switches back to the ISA used by the test
+ * code and calls the C function kprobes_test_case_end().
+ *
+ * For each run through the test case, test_case_run_count is incremented
+ * by one. For even runs, kprobes_test_case_end() saves a copy of the
+ * register and stack buffer contents from the test case just run. It then
+ * inserts a kprobe on the test case instruction 'test_insn' and returns a
+ * value to cause the test case code to be re-run.
+ *
+ * For odd numbered runs, kprobes_test_case_end() compares the register and
+ * stack buffer contents to those that were saved on the previous even
+ * numbered run (the one without the kprobe on test_insn). These should be
+ * the same if the kprobe instruction simulation routine is correct.
+ *
+ * The pair of test case runs is repeated with different combinations of
+ * flag values in CPSR and, for Thumb, different ITState. This is
+ * controlled by test_context_cpsr().
+ *
+ * BUILDING TEST CASES
+ * -------------------
+ *
+ *
+ * As an aid to building test cases, the stack buffer is initialised with
+ * some special values:
+ *
+ * [SP+13*4] Contains SP+120. This can be used to test instructions
+ * which load a value into SP.
+ *
+ * [SP+15*4] When testing branching instructions using TEST_BRANCH_{F,B},
+ * this holds the target address of the branch, 'test_after2'.
+ * This can be used to test instructions which load a PC value
+ * from memory.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/sched/clock.h>
+#include <linux/kprobes.h>
+#include <linux/errno.h>
+#include <linux/stddef.h>
+#include <linux/bug.h>
+#include <asm/opcodes.h>
+
+#include "core.h"
+#include "test-core.h"
+#include "../decode-arm.h"
+#include "../decode-thumb.h"
+
+
+#define BENCHMARKING 1
+
+
+/*
+ * Test basic API
+ */
+
+static bool test_regs_ok;
+static int test_func_instance;
+static int pre_handler_called;
+static int post_handler_called;
+static int kretprobe_handler_called;
+static int tests_failed;
+
+#define FUNC_ARG1 0x12345678
+#define FUNC_ARG2 0xabcdef
+
+
+#ifndef CONFIG_THUMB2_KERNEL
+
+#define RET(reg) "mov pc, "#reg
+
+long arm_func(long r0, long r1);
+
+static void __used __naked __arm_kprobes_test_func(void)
+{
+ __asm__ __volatile__ (
+ ".arm \n\t"
+ ".type arm_func, %%function \n\t"
+ "arm_func: \n\t"
+ "adds r0, r0, r1 \n\t"
+ "mov pc, lr \n\t"
+ ".code "NORMAL_ISA /* Back to Thumb if necessary */
+ : : : "r0", "r1", "cc"
+ );
+}
+
+#else /* CONFIG_THUMB2_KERNEL */
+
+#define RET(reg) "bx "#reg
+
+long thumb16_func(long r0, long r1);
+long thumb32even_func(long r0, long r1);
+long thumb32odd_func(long r0, long r1);
+
+static void __used __naked __thumb_kprobes_test_funcs(void)
+{
+ __asm__ __volatile__ (
+ ".type thumb16_func, %%function \n\t"
+ "thumb16_func: \n\t"
+ "adds.n r0, r0, r1 \n\t"
+ "bx lr \n\t"
+
+ ".align \n\t"
+ ".type thumb32even_func, %%function \n\t"
+ "thumb32even_func: \n\t"
+ "adds.w r0, r0, r1 \n\t"
+ "bx lr \n\t"
+
+ ".align \n\t"
+ "nop.n \n\t"
+ ".type thumb32odd_func, %%function \n\t"
+ "thumb32odd_func: \n\t"
+ "adds.w r0, r0, r1 \n\t"
+ "bx lr \n\t"
+
+ : : : "r0", "r1", "cc"
+ );
+}
+
+#endif /* CONFIG_THUMB2_KERNEL */
+
+
+static int call_test_func(long (*func)(long, long), bool check_test_regs)
+{
+ long ret;
+
+ ++test_func_instance;
+ test_regs_ok = false;
+
+ ret = (*func)(FUNC_ARG1, FUNC_ARG2);
+ if (ret != FUNC_ARG1 + FUNC_ARG2) {
+ pr_err("FAIL: call_test_func: func returned %lx\n", ret);
+ return false;
+ }
+
+ if (check_test_regs && !test_regs_ok) {
+ pr_err("FAIL: test regs not OK\n");
+ return false;
+ }
+
+ return true;
+}
+
+static int __kprobes pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ pre_handler_called = test_func_instance;
+ if (regs->ARM_r0 == FUNC_ARG1 && regs->ARM_r1 == FUNC_ARG2)
+ test_regs_ok = true;
+ return 0;
+}
+
+static void __kprobes post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ post_handler_called = test_func_instance;
+ if (regs->ARM_r0 != FUNC_ARG1 + FUNC_ARG2 || regs->ARM_r1 != FUNC_ARG2)
+ test_regs_ok = false;
+}
+
+static struct kprobe the_kprobe = {
+ .addr = 0,
+ .pre_handler = pre_handler,
+ .post_handler = post_handler
+};
+
+static int test_kprobe(long (*func)(long, long))
+{
+ int ret;
+
+ the_kprobe.addr = (kprobe_opcode_t *)func;
+ ret = register_kprobe(&the_kprobe);
+ if (ret < 0) {
+ pr_err("FAIL: register_kprobe failed with %d\n", ret);
+ return ret;
+ }
+
+ ret = call_test_func(func, true);
+
+ unregister_kprobe(&the_kprobe);
+ the_kprobe.flags = 0; /* Clear disable flag to allow reuse */
+
+ if (!ret)
+ return -EINVAL;
+ if (pre_handler_called != test_func_instance) {
+ pr_err("FAIL: kprobe pre_handler not called\n");
+ return -EINVAL;
+ }
+ if (post_handler_called != test_func_instance) {
+ pr_err("FAIL: kprobe post_handler not called\n");
+ return -EINVAL;
+ }
+ if (!call_test_func(func, false))
+ return -EINVAL;
+ if (pre_handler_called == test_func_instance ||
+ post_handler_called == test_func_instance) {
+ pr_err("FAIL: probe called after unregistering\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __kprobes
+kretprobe_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ kretprobe_handler_called = test_func_instance;
+ if (regs_return_value(regs) == FUNC_ARG1 + FUNC_ARG2)
+ test_regs_ok = true;
+ return 0;
+}
+
+static struct kretprobe the_kretprobe = {
+ .handler = kretprobe_handler,
+};
+
+static int test_kretprobe(long (*func)(long, long))
+{
+ int ret;
+
+ the_kretprobe.kp.addr = (kprobe_opcode_t *)func;
+ ret = register_kretprobe(&the_kretprobe);
+ if (ret < 0) {
+ pr_err("FAIL: register_kretprobe failed with %d\n", ret);
+ return ret;
+ }
+
+ ret = call_test_func(func, true);
+
+ unregister_kretprobe(&the_kretprobe);
+ the_kretprobe.kp.flags = 0; /* Clear disable flag to allow reuse */
+
+ if (!ret)
+ return -EINVAL;
+ if (kretprobe_handler_called != test_func_instance) {
+ pr_err("FAIL: kretprobe handler not called\n");
+ return -EINVAL;
+ }
+ if (!call_test_func(func, false))
+ return -EINVAL;
+ if (kretprobe_handler_called == test_func_instance) {
+ pr_err("FAIL: kretprobe called after unregistering\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int run_api_tests(long (*func)(long, long))
+{
+ int ret;
+
+ pr_info(" kprobe\n");
+ ret = test_kprobe(func);
+ if (ret < 0)
+ return ret;
+
+ pr_info(" kretprobe\n");
+ ret = test_kretprobe(func);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+
+/*
+ * Benchmarking
+ */
+
+#if BENCHMARKING
+
+static void __naked benchmark_nop(void)
+{
+ __asm__ __volatile__ (
+ "nop \n\t"
+ RET(lr)" \n\t"
+ );
+}
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define wide ".w"
+#else
+#define wide
+#endif
+
+static void __naked benchmark_pushpop1(void)
+{
+ __asm__ __volatile__ (
+ "stmdb"wide" sp!, {r3-r11,lr} \n\t"
+ "ldmia"wide" sp!, {r3-r11,pc}"
+ );
+}
+
+static void __naked benchmark_pushpop2(void)
+{
+ __asm__ __volatile__ (
+ "stmdb"wide" sp!, {r0-r8,lr} \n\t"
+ "ldmia"wide" sp!, {r0-r8,pc}"
+ );
+}
+
+static void __naked benchmark_pushpop3(void)
+{
+ __asm__ __volatile__ (
+ "stmdb"wide" sp!, {r4,lr} \n\t"
+ "ldmia"wide" sp!, {r4,pc}"
+ );
+}
+
+static void __naked benchmark_pushpop4(void)
+{
+ __asm__ __volatile__ (
+ "stmdb"wide" sp!, {r0,lr} \n\t"
+ "ldmia"wide" sp!, {r0,pc}"
+ );
+}
+
+
+#ifdef CONFIG_THUMB2_KERNEL
+
+static void __naked benchmark_pushpop_thumb(void)
+{
+ __asm__ __volatile__ (
+ "push.n {r0-r7,lr} \n\t"
+ "pop.n {r0-r7,pc}"
+ );
+}
+
+#endif
+
+static int __kprobes
+benchmark_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ return 0;
+}
+
+static int benchmark(void(*fn)(void))
+{
+ unsigned n, i, t, t0;
+
+ for (n = 1000; ; n *= 2) {
+ t0 = sched_clock();
+ for (i = n; i > 0; --i)
+ fn();
+ t = sched_clock() - t0;
+ if (t >= 250000000)
+ break; /* Stop once we took more than 0.25 seconds */
+ }
+ return t / n; /* Time for one iteration in nanoseconds */
+};
+
+static int kprobe_benchmark(void(*fn)(void), unsigned offset)
+{
+ struct kprobe k = {
+ .addr = (kprobe_opcode_t *)((uintptr_t)fn + offset),
+ .pre_handler = benchmark_pre_handler,
+ };
+
+ int ret = register_kprobe(&k);
+ if (ret < 0) {
+ pr_err("FAIL: register_kprobe failed with %d\n", ret);
+ return ret;
+ }
+
+ ret = benchmark(fn);
+
+ unregister_kprobe(&k);
+ return ret;
+};
+
+struct benchmarks {
+ void (*fn)(void);
+ unsigned offset;
+ const char *title;
+};
+
+static int run_benchmarks(void)
+{
+ int ret;
+ struct benchmarks list[] = {
+ {&benchmark_nop, 0, "nop"},
+ /*
+ * benchmark_pushpop{1,3} will have the optimised
+ * instruction emulation, whilst benchmark_pushpop{2,4} will
+ * be the equivalent unoptimised instructions.
+ */
+ {&benchmark_pushpop1, 0, "stmdb sp!, {r3-r11,lr}"},
+ {&benchmark_pushpop1, 4, "ldmia sp!, {r3-r11,pc}"},
+ {&benchmark_pushpop2, 0, "stmdb sp!, {r0-r8,lr}"},
+ {&benchmark_pushpop2, 4, "ldmia sp!, {r0-r8,pc}"},
+ {&benchmark_pushpop3, 0, "stmdb sp!, {r4,lr}"},
+ {&benchmark_pushpop3, 4, "ldmia sp!, {r4,pc}"},
+ {&benchmark_pushpop4, 0, "stmdb sp!, {r0,lr}"},
+ {&benchmark_pushpop4, 4, "ldmia sp!, {r0,pc}"},
+#ifdef CONFIG_THUMB2_KERNEL
+ {&benchmark_pushpop_thumb, 0, "push.n {r0-r7,lr}"},
+ {&benchmark_pushpop_thumb, 2, "pop.n {r0-r7,pc}"},
+#endif
+ {0}
+ };
+
+ struct benchmarks *b;
+ for (b = list; b->fn; ++b) {
+ ret = kprobe_benchmark(b->fn, b->offset);
+ if (ret < 0)
+ return ret;
+ pr_info(" %dns for kprobe %s\n", ret, b->title);
+ }
+
+ pr_info("\n");
+ return 0;
+}
+
+#endif /* BENCHMARKING */
+
+
+/*
+ * Decoding table self-consistency tests
+ */
+
+static const int decode_struct_sizes[NUM_DECODE_TYPES] = {
+ [DECODE_TYPE_TABLE] = sizeof(struct decode_table),
+ [DECODE_TYPE_CUSTOM] = sizeof(struct decode_custom),
+ [DECODE_TYPE_SIMULATE] = sizeof(struct decode_simulate),
+ [DECODE_TYPE_EMULATE] = sizeof(struct decode_emulate),
+ [DECODE_TYPE_OR] = sizeof(struct decode_or),
+ [DECODE_TYPE_REJECT] = sizeof(struct decode_reject)
+};
+
+static int table_iter(const union decode_item *table,
+ int (*fn)(const struct decode_header *, void *),
+ void *args)
+{
+ const struct decode_header *h = (struct decode_header *)table;
+ int result;
+
+ for (;;) {
+ enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+
+ if (type == DECODE_TYPE_END)
+ return 0;
+
+ result = fn(h, args);
+ if (result)
+ return result;
+
+ h = (struct decode_header *)
+ ((uintptr_t)h + decode_struct_sizes[type]);
+
+ }
+}
+
+static int table_test_fail(const struct decode_header *h, const char* message)
+{
+
+ pr_err("FAIL: kprobes test failure \"%s\" (mask %08x, value %08x)\n",
+ message, h->mask.bits, h->value.bits);
+ return -EINVAL;
+}
+
+struct table_test_args {
+ const union decode_item *root_table;
+ u32 parent_mask;
+ u32 parent_value;
+};
+
+static int table_test_fn(const struct decode_header *h, void *args)
+{
+ struct table_test_args *a = (struct table_test_args *)args;
+ enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+
+ if (h->value.bits & ~h->mask.bits)
+ return table_test_fail(h, "Match value has bits not in mask");
+
+ if ((h->mask.bits & a->parent_mask) != a->parent_mask)
+ return table_test_fail(h, "Mask has bits not in parent mask");
+
+ if ((h->value.bits ^ a->parent_value) & a->parent_mask)
+ return table_test_fail(h, "Value is inconsistent with parent");
+
+ if (type == DECODE_TYPE_TABLE) {
+ struct decode_table *d = (struct decode_table *)h;
+ struct table_test_args args2 = *a;
+ args2.parent_mask = h->mask.bits;
+ args2.parent_value = h->value.bits;
+ return table_iter(d->table.table, table_test_fn, &args2);
+ }
+
+ return 0;
+}
+
+static int table_test(const union decode_item *table)
+{
+ struct table_test_args args = {
+ .root_table = table,
+ .parent_mask = 0,
+ .parent_value = 0
+ };
+ return table_iter(args.root_table, table_test_fn, &args);
+}
+
+
+/*
+ * Decoding table test coverage analysis
+ *
+ * coverage_start() builds a coverage_table which contains a list of
+ * coverage_entry's to match each entry in the specified kprobes instruction
+ * decoding table.
+ *
+ * When test cases are run, coverage_add() is called to process each case.
+ * This looks up the corresponding entry in the coverage_table and sets it as
+ * being matched, as well as clearing the regs flag appropriate for the test.
+ *
+ * After all test cases have been run, coverage_end() is called to check that
+ * all entries in coverage_table have been matched and that all regs flags are
+ * cleared. I.e. that all possible combinations of instructions described by
+ * the kprobes decoding tables have had a test case executed for them.
+ */
+
+bool coverage_fail;
+
+#define MAX_COVERAGE_ENTRIES 256
+
+struct coverage_entry {
+ const struct decode_header *header;
+ unsigned regs;
+ unsigned nesting;
+ char matched;
+};
+
+struct coverage_table {
+ struct coverage_entry *base;
+ unsigned num_entries;
+ unsigned nesting;
+};
+
+struct coverage_table coverage;
+
+#define COVERAGE_ANY_REG (1<<0)
+#define COVERAGE_SP (1<<1)
+#define COVERAGE_PC (1<<2)
+#define COVERAGE_PCWB (1<<3)
+
+static const char coverage_register_lookup[16] = {
+ [REG_TYPE_ANY] = COVERAGE_ANY_REG | COVERAGE_SP | COVERAGE_PC,
+ [REG_TYPE_SAMEAS16] = COVERAGE_ANY_REG,
+ [REG_TYPE_SP] = COVERAGE_SP,
+ [REG_TYPE_PC] = COVERAGE_PC,
+ [REG_TYPE_NOSP] = COVERAGE_ANY_REG | COVERAGE_SP,
+ [REG_TYPE_NOSPPC] = COVERAGE_ANY_REG | COVERAGE_SP | COVERAGE_PC,
+ [REG_TYPE_NOPC] = COVERAGE_ANY_REG | COVERAGE_PC,
+ [REG_TYPE_NOPCWB] = COVERAGE_ANY_REG | COVERAGE_PC | COVERAGE_PCWB,
+ [REG_TYPE_NOPCX] = COVERAGE_ANY_REG,
+ [REG_TYPE_NOSPPCX] = COVERAGE_ANY_REG | COVERAGE_SP,
+};
+
+unsigned coverage_start_registers(const struct decode_header *h)
+{
+ unsigned regs = 0;
+ int i;
+ for (i = 0; i < 20; i += 4) {
+ int r = (h->type_regs.bits >> (DECODE_TYPE_BITS + i)) & 0xf;
+ regs |= coverage_register_lookup[r] << i;
+ }
+ return regs;
+}
+
+static int coverage_start_fn(const struct decode_header *h, void *args)
+{
+ struct coverage_table *coverage = (struct coverage_table *)args;
+ enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+ struct coverage_entry *entry = coverage->base + coverage->num_entries;
+
+ if (coverage->num_entries == MAX_COVERAGE_ENTRIES - 1) {
+ pr_err("FAIL: Out of space for test coverage data");
+ return -ENOMEM;
+ }
+
+ ++coverage->num_entries;
+
+ entry->header = h;
+ entry->regs = coverage_start_registers(h);
+ entry->nesting = coverage->nesting;
+ entry->matched = false;
+
+ if (type == DECODE_TYPE_TABLE) {
+ struct decode_table *d = (struct decode_table *)h;
+ int ret;
+ ++coverage->nesting;
+ ret = table_iter(d->table.table, coverage_start_fn, coverage);
+ --coverage->nesting;
+ return ret;
+ }
+
+ return 0;
+}
+
+static int coverage_start(const union decode_item *table)
+{
+ coverage.base = kmalloc_array(MAX_COVERAGE_ENTRIES,
+ sizeof(struct coverage_entry),
+ GFP_KERNEL);
+ coverage.num_entries = 0;
+ coverage.nesting = 0;
+ return table_iter(table, coverage_start_fn, &coverage);
+}
+
+static void
+coverage_add_registers(struct coverage_entry *entry, kprobe_opcode_t insn)
+{
+ int regs = entry->header->type_regs.bits >> DECODE_TYPE_BITS;
+ int i;
+ for (i = 0; i < 20; i += 4) {
+ enum decode_reg_type reg_type = (regs >> i) & 0xf;
+ int reg = (insn >> i) & 0xf;
+ int flag;
+
+ if (!reg_type)
+ continue;
+
+ if (reg == 13)
+ flag = COVERAGE_SP;
+ else if (reg == 15)
+ flag = COVERAGE_PC;
+ else
+ flag = COVERAGE_ANY_REG;
+ entry->regs &= ~(flag << i);
+
+ switch (reg_type) {
+
+ case REG_TYPE_NONE:
+ case REG_TYPE_ANY:
+ case REG_TYPE_SAMEAS16:
+ break;
+
+ case REG_TYPE_SP:
+ if (reg != 13)
+ return;
+ break;
+
+ case REG_TYPE_PC:
+ if (reg != 15)
+ return;
+ break;
+
+ case REG_TYPE_NOSP:
+ if (reg == 13)
+ return;
+ break;
+
+ case REG_TYPE_NOSPPC:
+ case REG_TYPE_NOSPPCX:
+ if (reg == 13 || reg == 15)
+ return;
+ break;
+
+ case REG_TYPE_NOPCWB:
+ if (!is_writeback(insn))
+ break;
+ if (reg == 15) {
+ entry->regs &= ~(COVERAGE_PCWB << i);
+ return;
+ }
+ break;
+
+ case REG_TYPE_NOPC:
+ case REG_TYPE_NOPCX:
+ if (reg == 15)
+ return;
+ break;
+ }
+
+ }
+}
+
+static void coverage_add(kprobe_opcode_t insn)
+{
+ struct coverage_entry *entry = coverage.base;
+ struct coverage_entry *end = coverage.base + coverage.num_entries;
+ bool matched = false;
+ unsigned nesting = 0;
+
+ for (; entry < end; ++entry) {
+ const struct decode_header *h = entry->header;
+ enum decode_type type = h->type_regs.bits & DECODE_TYPE_MASK;
+
+ if (entry->nesting > nesting)
+ continue; /* Skip sub-table we didn't match */
+
+ if (entry->nesting < nesting)
+ break; /* End of sub-table we were scanning */
+
+ if (!matched) {
+ if ((insn & h->mask.bits) != h->value.bits)
+ continue;
+ entry->matched = true;
+ }
+
+ switch (type) {
+
+ case DECODE_TYPE_TABLE:
+ ++nesting;
+ break;
+
+ case DECODE_TYPE_CUSTOM:
+ case DECODE_TYPE_SIMULATE:
+ case DECODE_TYPE_EMULATE:
+ coverage_add_registers(entry, insn);
+ return;
+
+ case DECODE_TYPE_OR:
+ matched = true;
+ break;
+
+ case DECODE_TYPE_REJECT:
+ default:
+ return;
+ }
+
+ }
+}
+
+static void coverage_end(void)
+{
+ struct coverage_entry *entry = coverage.base;
+ struct coverage_entry *end = coverage.base + coverage.num_entries;
+
+ for (; entry < end; ++entry) {
+ u32 mask = entry->header->mask.bits;
+ u32 value = entry->header->value.bits;
+
+ if (entry->regs) {
+ pr_err("FAIL: Register test coverage missing for %08x %08x (%05x)\n",
+ mask, value, entry->regs);
+ coverage_fail = true;
+ }
+ if (!entry->matched) {
+ pr_err("FAIL: Test coverage entry missing for %08x %08x\n",
+ mask, value);
+ coverage_fail = true;
+ }
+ }
+
+ kfree(coverage.base);
+}
+
+
+/*
+ * Framework for instruction set test cases
+ */
+
+void __naked __kprobes_test_case_start(void)
+{
+ __asm__ __volatile__ (
+ "mov r2, sp \n\t"
+ "bic r3, r2, #7 \n\t"
+ "mov sp, r3 \n\t"
+ "stmdb sp!, {r2-r11} \n\t"
+ "sub sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+ "bic r0, lr, #1 @ r0 = inline data \n\t"
+ "mov r1, sp \n\t"
+ "bl kprobes_test_case_start \n\t"
+ RET(r0)" \n\t"
+ );
+}
+
+#ifndef CONFIG_THUMB2_KERNEL
+
+void __naked __kprobes_test_case_end_32(void)
+{
+ __asm__ __volatile__ (
+ "mov r4, lr \n\t"
+ "bl kprobes_test_case_end \n\t"
+ "cmp r0, #0 \n\t"
+ "movne pc, r0 \n\t"
+ "mov r0, r4 \n\t"
+ "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+ "ldmia sp!, {r2-r11} \n\t"
+ "mov sp, r2 \n\t"
+ "mov pc, r0 \n\t"
+ );
+}
+
+#else /* CONFIG_THUMB2_KERNEL */
+
+void __naked __kprobes_test_case_end_16(void)
+{
+ __asm__ __volatile__ (
+ "mov r4, lr \n\t"
+ "bl kprobes_test_case_end \n\t"
+ "cmp r0, #0 \n\t"
+ "bxne r0 \n\t"
+ "mov r0, r4 \n\t"
+ "add sp, sp, #"__stringify(TEST_MEMORY_SIZE)"\n\t"
+ "ldmia sp!, {r2-r11} \n\t"
+ "mov sp, r2 \n\t"
+ "bx r0 \n\t"
+ );
+}
+
+void __naked __kprobes_test_case_end_32(void)
+{
+ __asm__ __volatile__ (
+ ".arm \n\t"
+ "orr lr, lr, #1 @ will return to Thumb code \n\t"
+ "ldr pc, 1f \n\t"
+ "1: \n\t"
+ ".word __kprobes_test_case_end_16 \n\t"
+ );
+}
+
+#endif
+
+
+int kprobe_test_flags;
+int kprobe_test_cc_position;
+
+static int test_try_count;
+static int test_pass_count;
+static int test_fail_count;
+
+static struct pt_regs initial_regs;
+static struct pt_regs expected_regs;
+static struct pt_regs result_regs;
+
+static u32 expected_memory[TEST_MEMORY_SIZE/sizeof(u32)];
+
+static const char *current_title;
+static struct test_arg *current_args;
+static u32 *current_stack;
+static uintptr_t current_branch_target;
+
+static uintptr_t current_code_start;
+static kprobe_opcode_t current_instruction;
+
+
+#define TEST_CASE_PASSED -1
+#define TEST_CASE_FAILED -2
+
+static int test_case_run_count;
+static bool test_case_is_thumb;
+static int test_instance;
+
+static unsigned long test_check_cc(int cc, unsigned long cpsr)
+{
+ int ret = arm_check_condition(cc << 28, cpsr);
+
+ return (ret != ARM_OPCODE_CONDTEST_FAIL);
+}
+
+static int is_last_scenario;
+static int probe_should_run; /* 0 = no, 1 = yes, -1 = unknown */
+static int memory_needs_checking;
+
+static unsigned long test_context_cpsr(int scenario)
+{
+ unsigned long cpsr;
+
+ probe_should_run = 1;
+
+ /* Default case is that we cycle through 16 combinations of flags */
+ cpsr = (scenario & 0xf) << 28; /* N,Z,C,V flags */
+ cpsr |= (scenario & 0xf) << 16; /* GE flags */
+ cpsr |= (scenario & 0x1) << 27; /* Toggle Q flag */
+
+ if (!test_case_is_thumb) {
+ /* Testing ARM code */
+ int cc = current_instruction >> 28;
+
+ probe_should_run = test_check_cc(cc, cpsr) != 0;
+ if (scenario == 15)
+ is_last_scenario = true;
+
+ } else if (kprobe_test_flags & TEST_FLAG_NO_ITBLOCK) {
+ /* Testing Thumb code without setting ITSTATE */
+ if (kprobe_test_cc_position) {
+ int cc = (current_instruction >> kprobe_test_cc_position) & 0xf;
+ probe_should_run = test_check_cc(cc, cpsr) != 0;
+ }
+
+ if (scenario == 15)
+ is_last_scenario = true;
+
+ } else if (kprobe_test_flags & TEST_FLAG_FULL_ITBLOCK) {
+ /* Testing Thumb code with all combinations of ITSTATE */
+ unsigned x = (scenario >> 4);
+ unsigned cond_base = x % 7; /* ITSTATE<7:5> */
+ unsigned mask = x / 7 + 2; /* ITSTATE<4:0>, bits reversed */
+
+ if (mask > 0x1f) {
+ /* Finish by testing state from instruction 'itt al' */
+ cond_base = 7;
+ mask = 0x4;
+ if ((scenario & 0xf) == 0xf)
+ is_last_scenario = true;
+ }
+
+ cpsr |= cond_base << 13; /* ITSTATE<7:5> */
+ cpsr |= (mask & 0x1) << 12; /* ITSTATE<4> */
+ cpsr |= (mask & 0x2) << 10; /* ITSTATE<3> */
+ cpsr |= (mask & 0x4) << 8; /* ITSTATE<2> */
+ cpsr |= (mask & 0x8) << 23; /* ITSTATE<1> */
+ cpsr |= (mask & 0x10) << 21; /* ITSTATE<0> */
+
+ probe_should_run = test_check_cc((cpsr >> 12) & 0xf, cpsr) != 0;
+
+ } else {
+ /* Testing Thumb code with several combinations of ITSTATE */
+ switch (scenario) {
+ case 16: /* Clear NZCV flags and 'it eq' state (false as Z=0) */
+ cpsr = 0x00000800;
+ probe_should_run = 0;
+ break;
+ case 17: /* Set NZCV flags and 'it vc' state (false as V=1) */
+ cpsr = 0xf0007800;
+ probe_should_run = 0;
+ break;
+ case 18: /* Clear NZCV flags and 'it ls' state (true as C=0) */
+ cpsr = 0x00009800;
+ break;
+ case 19: /* Set NZCV flags and 'it cs' state (true as C=1) */
+ cpsr = 0xf0002800;
+ is_last_scenario = true;
+ break;
+ }
+ }
+
+ return cpsr;
+}
+
+static void setup_test_context(struct pt_regs *regs)
+{
+ int scenario = test_case_run_count>>1;
+ unsigned long val;
+ struct test_arg *args;
+ int i;
+
+ is_last_scenario = false;
+ memory_needs_checking = false;
+
+ /* Initialise test memory on stack */
+ val = (scenario & 1) ? VALM : ~VALM;
+ for (i = 0; i < TEST_MEMORY_SIZE / sizeof(current_stack[0]); ++i)
+ current_stack[i] = val + (i << 8);
+ /* Put target of branch on stack for tests which load PC from memory */
+ if (current_branch_target)
+ current_stack[15] = current_branch_target;
+ /* Put a value for SP on stack for tests which load SP from memory */
+ current_stack[13] = (u32)current_stack + 120;
+
+ /* Initialise register values to their default state */
+ val = (scenario & 2) ? VALR : ~VALR;
+ for (i = 0; i < 13; ++i)
+ regs->uregs[i] = val ^ (i << 8);
+ regs->ARM_lr = val ^ (14 << 8);
+ regs->ARM_cpsr &= ~(APSR_MASK | PSR_IT_MASK);
+ regs->ARM_cpsr |= test_context_cpsr(scenario);
+
+ /* Perform testcase specific register setup */
+ args = current_args;
+ for (; args[0].type != ARG_TYPE_END; ++args)
+ switch (args[0].type) {
+ case ARG_TYPE_REG: {
+ struct test_arg_regptr *arg =
+ (struct test_arg_regptr *)args;
+ regs->uregs[arg->reg] = arg->val;
+ break;
+ }
+ case ARG_TYPE_PTR: {
+ struct test_arg_regptr *arg =
+ (struct test_arg_regptr *)args;
+ regs->uregs[arg->reg] =
+ (unsigned long)current_stack + arg->val;
+ memory_needs_checking = true;
+ /*
+ * Test memory at an address below SP is in danger of
+ * being altered by an interrupt occurring and pushing
+ * data onto the stack. Disable interrupts to stop this.
+ */
+ if (arg->reg == 13)
+ regs->ARM_cpsr |= PSR_I_BIT;
+ break;
+ }
+ case ARG_TYPE_MEM: {
+ struct test_arg_mem *arg = (struct test_arg_mem *)args;
+ current_stack[arg->index] = arg->val;
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+struct test_probe {
+ struct kprobe kprobe;
+ bool registered;
+ int hit;
+};
+
+static void unregister_test_probe(struct test_probe *probe)
+{
+ if (probe->registered) {
+ unregister_kprobe(&probe->kprobe);
+ probe->kprobe.flags = 0; /* Clear disable flag to allow reuse */
+ }
+ probe->registered = false;
+}
+
+static int register_test_probe(struct test_probe *probe)
+{
+ int ret;
+
+ if (probe->registered)
+ BUG();
+
+ ret = register_kprobe(&probe->kprobe);
+ if (ret >= 0) {
+ probe->registered = true;
+ probe->hit = -1;
+ }
+ return ret;
+}
+
+static int __kprobes
+test_before_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ container_of(p, struct test_probe, kprobe)->hit = test_instance;
+ return 0;
+}
+
+static void __kprobes
+test_before_post_handler(struct kprobe *p, struct pt_regs *regs,
+ unsigned long flags)
+{
+ setup_test_context(regs);
+ initial_regs = *regs;
+ initial_regs.ARM_cpsr &= ~PSR_IGNORE_BITS;
+}
+
+static int __kprobes
+test_case_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ container_of(p, struct test_probe, kprobe)->hit = test_instance;
+ return 0;
+}
+
+static int __kprobes
+test_after_pre_handler(struct kprobe *p, struct pt_regs *regs)
+{
+ struct test_arg *args;
+
+ if (container_of(p, struct test_probe, kprobe)->hit == test_instance)
+ return 0; /* Already run for this test instance */
+
+ result_regs = *regs;
+
+ /* Mask out results which are indeterminate */
+ result_regs.ARM_cpsr &= ~PSR_IGNORE_BITS;
+ for (args = current_args; args[0].type != ARG_TYPE_END; ++args)
+ if (args[0].type == ARG_TYPE_REG_MASKED) {
+ struct test_arg_regptr *arg =
+ (struct test_arg_regptr *)args;
+ result_regs.uregs[arg->reg] &= arg->val;
+ }
+
+ /* Undo any changes done to SP by the test case */
+ regs->ARM_sp = (unsigned long)current_stack;
+ /* Enable interrupts in case setup_test_context disabled them */
+ regs->ARM_cpsr &= ~PSR_I_BIT;
+
+ container_of(p, struct test_probe, kprobe)->hit = test_instance;
+ return 0;
+}
+
+static struct test_probe test_before_probe = {
+ .kprobe.pre_handler = test_before_pre_handler,
+ .kprobe.post_handler = test_before_post_handler,
+};
+
+static struct test_probe test_case_probe = {
+ .kprobe.pre_handler = test_case_pre_handler,
+};
+
+static struct test_probe test_after_probe = {
+ .kprobe.pre_handler = test_after_pre_handler,
+};
+
+static struct test_probe test_after2_probe = {
+ .kprobe.pre_handler = test_after_pre_handler,
+};
+
+static void test_case_cleanup(void)
+{
+ unregister_test_probe(&test_before_probe);
+ unregister_test_probe(&test_case_probe);
+ unregister_test_probe(&test_after_probe);
+ unregister_test_probe(&test_after2_probe);
+}
+
+static void print_registers(struct pt_regs *regs)
+{
+ pr_err("r0 %08lx | r1 %08lx | r2 %08lx | r3 %08lx\n",
+ regs->ARM_r0, regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+ pr_err("r4 %08lx | r5 %08lx | r6 %08lx | r7 %08lx\n",
+ regs->ARM_r4, regs->ARM_r5, regs->ARM_r6, regs->ARM_r7);
+ pr_err("r8 %08lx | r9 %08lx | r10 %08lx | r11 %08lx\n",
+ regs->ARM_r8, regs->ARM_r9, regs->ARM_r10, regs->ARM_fp);
+ pr_err("r12 %08lx | sp %08lx | lr %08lx | pc %08lx\n",
+ regs->ARM_ip, regs->ARM_sp, regs->ARM_lr, regs->ARM_pc);
+ pr_err("cpsr %08lx\n", regs->ARM_cpsr);
+}
+
+static void print_memory(u32 *mem, size_t size)
+{
+ int i;
+ for (i = 0; i < size / sizeof(u32); i += 4)
+ pr_err("%08x %08x %08x %08x\n", mem[i], mem[i+1],
+ mem[i+2], mem[i+3]);
+}
+
+static size_t expected_memory_size(u32 *sp)
+{
+ size_t size = sizeof(expected_memory);
+ int offset = (uintptr_t)sp - (uintptr_t)current_stack;
+ if (offset > 0)
+ size -= offset;
+ return size;
+}
+
+static void test_case_failed(const char *message)
+{
+ test_case_cleanup();
+
+ pr_err("FAIL: %s\n", message);
+ pr_err("FAIL: Test %s\n", current_title);
+ pr_err("FAIL: Scenario %d\n", test_case_run_count >> 1);
+}
+
+static unsigned long next_instruction(unsigned long pc)
+{
+#ifdef CONFIG_THUMB2_KERNEL
+ if ((pc & 1) &&
+ !is_wide_instruction(__mem_to_opcode_thumb16(*(u16 *)(pc - 1))))
+ return pc + 2;
+ else
+#endif
+ return pc + 4;
+}
+
+static uintptr_t __used kprobes_test_case_start(const char **title, void *stack)
+{
+ struct test_arg *args;
+ struct test_arg_end *end_arg;
+ unsigned long test_code;
+
+ current_title = *title++;
+ args = (struct test_arg *)title;
+ current_args = args;
+ current_stack = stack;
+
+ ++test_try_count;
+
+ while (args->type != ARG_TYPE_END)
+ ++args;
+ end_arg = (struct test_arg_end *)args;
+
+ test_code = (unsigned long)(args + 1); /* Code starts after args */
+
+ test_case_is_thumb = end_arg->flags & ARG_FLAG_THUMB;
+ if (test_case_is_thumb)
+ test_code |= 1;
+
+ current_code_start = test_code;
+
+ current_branch_target = 0;
+ if (end_arg->branch_offset != end_arg->end_offset)
+ current_branch_target = test_code + end_arg->branch_offset;
+
+ test_code += end_arg->code_offset;
+ test_before_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
+
+ test_code = next_instruction(test_code);
+ test_case_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
+
+ if (test_case_is_thumb) {
+ u16 *p = (u16 *)(test_code & ~1);
+ current_instruction = __mem_to_opcode_thumb16(p[0]);
+ if (is_wide_instruction(current_instruction)) {
+ u16 instr2 = __mem_to_opcode_thumb16(p[1]);
+ current_instruction = __opcode_thumb32_compose(current_instruction, instr2);
+ }
+ } else {
+ current_instruction = __mem_to_opcode_arm(*(u32 *)test_code);
+ }
+
+ if (current_title[0] == '.')
+ verbose("%s\n", current_title);
+ else
+ verbose("%s\t@ %0*x\n", current_title,
+ test_case_is_thumb ? 4 : 8,
+ current_instruction);
+
+ test_code = next_instruction(test_code);
+ test_after_probe.kprobe.addr = (kprobe_opcode_t *)test_code;
+
+ if (kprobe_test_flags & TEST_FLAG_NARROW_INSTR) {
+ if (!test_case_is_thumb ||
+ is_wide_instruction(current_instruction)) {
+ test_case_failed("expected 16-bit instruction");
+ goto fail;
+ }
+ } else {
+ if (test_case_is_thumb &&
+ !is_wide_instruction(current_instruction)) {
+ test_case_failed("expected 32-bit instruction");
+ goto fail;
+ }
+ }
+
+ coverage_add(current_instruction);
+
+ if (end_arg->flags & ARG_FLAG_UNSUPPORTED) {
+ if (register_test_probe(&test_case_probe) < 0)
+ goto pass;
+ test_case_failed("registered probe for unsupported instruction");
+ goto fail;
+ }
+
+ if (end_arg->flags & ARG_FLAG_SUPPORTED) {
+ if (register_test_probe(&test_case_probe) >= 0)
+ goto pass;
+ test_case_failed("couldn't register probe for supported instruction");
+ goto fail;
+ }
+
+ if (register_test_probe(&test_before_probe) < 0) {
+ test_case_failed("register test_before_probe failed");
+ goto fail;
+ }
+ if (register_test_probe(&test_after_probe) < 0) {
+ test_case_failed("register test_after_probe failed");
+ goto fail;
+ }
+ if (current_branch_target) {
+ test_after2_probe.kprobe.addr =
+ (kprobe_opcode_t *)current_branch_target;
+ if (register_test_probe(&test_after2_probe) < 0) {
+ test_case_failed("register test_after2_probe failed");
+ goto fail;
+ }
+ }
+
+ /* Start first run of test case */
+ test_case_run_count = 0;
+ ++test_instance;
+ return current_code_start;
+pass:
+ test_case_run_count = TEST_CASE_PASSED;
+ return (uintptr_t)test_after_probe.kprobe.addr;
+fail:
+ test_case_run_count = TEST_CASE_FAILED;
+ return (uintptr_t)test_after_probe.kprobe.addr;
+}
+
+static bool check_test_results(void)
+{
+ size_t mem_size = 0;
+ u32 *mem = 0;
+
+ if (memcmp(&expected_regs, &result_regs, sizeof(expected_regs))) {
+ test_case_failed("registers differ");
+ goto fail;
+ }
+
+ if (memory_needs_checking) {
+ mem = (u32 *)result_regs.ARM_sp;
+ mem_size = expected_memory_size(mem);
+ if (memcmp(expected_memory, mem, mem_size)) {
+ test_case_failed("test memory differs");
+ goto fail;
+ }
+ }
+
+ return true;
+
+fail:
+ pr_err("initial_regs:\n");
+ print_registers(&initial_regs);
+ pr_err("expected_regs:\n");
+ print_registers(&expected_regs);
+ pr_err("result_regs:\n");
+ print_registers(&result_regs);
+
+ if (mem) {
+ pr_err("expected_memory:\n");
+ print_memory(expected_memory, mem_size);
+ pr_err("result_memory:\n");
+ print_memory(mem, mem_size);
+ }
+
+ return false;
+}
+
+static uintptr_t __used kprobes_test_case_end(void)
+{
+ if (test_case_run_count < 0) {
+ if (test_case_run_count == TEST_CASE_PASSED)
+ /* kprobes_test_case_start did all the needed testing */
+ goto pass;
+ else
+ /* kprobes_test_case_start failed */
+ goto fail;
+ }
+
+ if (test_before_probe.hit != test_instance) {
+ test_case_failed("test_before_handler not run");
+ goto fail;
+ }
+
+ if (test_after_probe.hit != test_instance &&
+ test_after2_probe.hit != test_instance) {
+ test_case_failed("test_after_handler not run");
+ goto fail;
+ }
+
+ /*
+ * Even numbered test runs ran without a probe on the test case so
+ * we can gather reference results. The subsequent odd numbered run
+ * will have the probe inserted.
+ */
+ if ((test_case_run_count & 1) == 0) {
+ /* Save results from run without probe */
+ u32 *mem = (u32 *)result_regs.ARM_sp;
+ expected_regs = result_regs;
+ memcpy(expected_memory, mem, expected_memory_size(mem));
+
+ /* Insert probe onto test case instruction */
+ if (register_test_probe(&test_case_probe) < 0) {
+ test_case_failed("register test_case_probe failed");
+ goto fail;
+ }
+ } else {
+ /* Check probe ran as expected */
+ if (probe_should_run == 1) {
+ if (test_case_probe.hit != test_instance) {
+ test_case_failed("test_case_handler not run");
+ goto fail;
+ }
+ } else if (probe_should_run == 0) {
+ if (test_case_probe.hit == test_instance) {
+ test_case_failed("test_case_handler ran");
+ goto fail;
+ }
+ }
+
+ /* Remove probe for any subsequent reference run */
+ unregister_test_probe(&test_case_probe);
+
+ if (!check_test_results())
+ goto fail;
+
+ if (is_last_scenario)
+ goto pass;
+ }
+
+ /* Do next test run */
+ ++test_case_run_count;
+ ++test_instance;
+ return current_code_start;
+fail:
+ ++test_fail_count;
+ goto end;
+pass:
+ ++test_pass_count;
+end:
+ test_case_cleanup();
+ return 0;
+}
+
+
+/*
+ * Top level test functions
+ */
+
+static int run_test_cases(void (*tests)(void), const union decode_item *table)
+{
+ int ret;
+
+ pr_info(" Check decoding tables\n");
+ ret = table_test(table);
+ if (ret)
+ return ret;
+
+ pr_info(" Run test cases\n");
+ ret = coverage_start(table);
+ if (ret)
+ return ret;
+
+ tests();
+
+ coverage_end();
+ return 0;
+}
+
+
+static int __init run_all_tests(void)
+{
+ int ret = 0;
+
+ pr_info("Beginning kprobe tests...\n");
+
+#ifndef CONFIG_THUMB2_KERNEL
+
+ pr_info("Probe ARM code\n");
+ ret = run_api_tests(arm_func);
+ if (ret)
+ goto out;
+
+ pr_info("ARM instruction simulation\n");
+ ret = run_test_cases(kprobe_arm_test_cases, probes_decode_arm_table);
+ if (ret)
+ goto out;
+
+#else /* CONFIG_THUMB2_KERNEL */
+
+ pr_info("Probe 16-bit Thumb code\n");
+ ret = run_api_tests(thumb16_func);
+ if (ret)
+ goto out;
+
+ pr_info("Probe 32-bit Thumb code, even halfword\n");
+ ret = run_api_tests(thumb32even_func);
+ if (ret)
+ goto out;
+
+ pr_info("Probe 32-bit Thumb code, odd halfword\n");
+ ret = run_api_tests(thumb32odd_func);
+ if (ret)
+ goto out;
+
+ pr_info("16-bit Thumb instruction simulation\n");
+ ret = run_test_cases(kprobe_thumb16_test_cases,
+ probes_decode_thumb16_table);
+ if (ret)
+ goto out;
+
+ pr_info("32-bit Thumb instruction simulation\n");
+ ret = run_test_cases(kprobe_thumb32_test_cases,
+ probes_decode_thumb32_table);
+ if (ret)
+ goto out;
+#endif
+
+ pr_info("Total instruction simulation tests=%d, pass=%d fail=%d\n",
+ test_try_count, test_pass_count, test_fail_count);
+ if (test_fail_count) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+#if BENCHMARKING
+ pr_info("Benchmarks\n");
+ ret = run_benchmarks();
+ if (ret)
+ goto out;
+#endif
+
+#if __LINUX_ARM_ARCH__ >= 7
+ /* We are able to run all test cases so coverage should be complete */
+ if (coverage_fail) {
+ pr_err("FAIL: Test coverage checks failed\n");
+ ret = -EINVAL;
+ goto out;
+ }
+#endif
+
+out:
+ if (ret == 0)
+ ret = tests_failed;
+ if (ret == 0)
+ pr_info("Finished kprobe tests OK\n");
+ else
+ pr_err("kprobe tests failed\n");
+
+ return ret;
+}
+
+
+/*
+ * Module setup
+ */
+
+#ifdef MODULE
+
+static void __exit kprobe_test_exit(void)
+{
+}
+
+module_init(run_all_tests)
+module_exit(kprobe_test_exit)
+MODULE_LICENSE("GPL");
+
+#else /* !MODULE */
+
+late_initcall(run_all_tests);
+
+#endif
diff --git a/arch/arm/probes/kprobes/test-core.h b/arch/arm/probes/kprobes/test-core.h
new file mode 100644
index 000000000..94285203e
--- /dev/null
+++ b/arch/arm/probes/kprobes/test-core.h
@@ -0,0 +1,458 @@
+/*
+ * arch/arm/probes/kprobes/test-core.h
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define VERBOSE 0 /* Set to '1' for more logging of test cases */
+
+#ifdef CONFIG_THUMB2_KERNEL
+#define NORMAL_ISA "16"
+#else
+#define NORMAL_ISA "32"
+#endif
+
+
+/* Flags used in kprobe_test_flags */
+#define TEST_FLAG_NO_ITBLOCK (1<<0)
+#define TEST_FLAG_FULL_ITBLOCK (1<<1)
+#define TEST_FLAG_NARROW_INSTR (1<<2)
+
+extern int kprobe_test_flags;
+extern int kprobe_test_cc_position;
+
+
+#define TEST_MEMORY_SIZE 256
+
+
+/*
+ * Test case structures.
+ *
+ * The arguments given to test cases can be one of three types.
+ *
+ * ARG_TYPE_REG
+ * Load a register with the given value.
+ *
+ * ARG_TYPE_PTR
+ * Load a register with a pointer into the stack buffer (SP + given value).
+ *
+ * ARG_TYPE_MEM
+ * Store the given value into the stack buffer at [SP+index].
+ *
+ */
+
+#define ARG_TYPE_END 0
+#define ARG_TYPE_REG 1
+#define ARG_TYPE_PTR 2
+#define ARG_TYPE_MEM 3
+#define ARG_TYPE_REG_MASKED 4
+
+#define ARG_FLAG_UNSUPPORTED 0x01
+#define ARG_FLAG_SUPPORTED 0x02
+#define ARG_FLAG_THUMB 0x10 /* Must be 16 so TEST_ISA can be used */
+#define ARG_FLAG_ARM 0x20 /* Must be 32 so TEST_ISA can be used */
+
+struct test_arg {
+ u8 type; /* ARG_TYPE_x */
+ u8 _padding[7];
+};
+
+struct test_arg_regptr {
+ u8 type; /* ARG_TYPE_REG or ARG_TYPE_PTR or ARG_TYPE_REG_MASKED */
+ u8 reg;
+ u8 _padding[2];
+ u32 val;
+};
+
+struct test_arg_mem {
+ u8 type; /* ARG_TYPE_MEM */
+ u8 index;
+ u8 _padding[2];
+ u32 val;
+};
+
+struct test_arg_end {
+ u8 type; /* ARG_TYPE_END */
+ u8 flags; /* ARG_FLAG_x */
+ u16 code_offset;
+ u16 branch_offset;
+ u16 end_offset;
+};
+
+
+/*
+ * Building blocks for test cases.
+ *
+ * Each test case is wrapped between TESTCASE_START and TESTCASE_END.
+ *
+ * To specify arguments for a test case the TEST_ARG_{REG,PTR,MEM} macros are
+ * used followed by a terminating TEST_ARG_END.
+ *
+ * After this, the instruction to be tested is defined with TEST_INSTRUCTION.
+ * Or for branches, TEST_BRANCH_B and TEST_BRANCH_F (branch forwards/backwards).
+ *
+ * Some specific test cases may make use of other custom constructs.
+ */
+
+#if VERBOSE
+#define verbose(fmt, ...) pr_info(fmt, ##__VA_ARGS__)
+#else
+#define verbose(fmt, ...)
+#endif
+
+#define TEST_GROUP(title) \
+ verbose("\n"); \
+ verbose(title"\n"); \
+ verbose("---------------------------------------------------------\n");
+
+#define TESTCASE_START(title) \
+ __asm__ __volatile__ ( \
+ "bl __kprobes_test_case_start \n\t" \
+ ".pushsection .rodata \n\t" \
+ "10: \n\t" \
+ /* don't use .asciz here as 'title' may be */ \
+ /* multiple strings to be concatenated. */ \
+ ".ascii "#title" \n\t" \
+ ".byte 0 \n\t" \
+ ".popsection \n\t" \
+ ".word 10b \n\t"
+
+#define TEST_ARG_REG(reg, val) \
+ ".byte "__stringify(ARG_TYPE_REG)" \n\t" \
+ ".byte "#reg" \n\t" \
+ ".short 0 \n\t" \
+ ".word "#val" \n\t"
+
+#define TEST_ARG_PTR(reg, val) \
+ ".byte "__stringify(ARG_TYPE_PTR)" \n\t" \
+ ".byte "#reg" \n\t" \
+ ".short 0 \n\t" \
+ ".word "#val" \n\t"
+
+#define TEST_ARG_MEM(index, val) \
+ ".byte "__stringify(ARG_TYPE_MEM)" \n\t" \
+ ".byte "#index" \n\t" \
+ ".short 0 \n\t" \
+ ".word "#val" \n\t"
+
+#define TEST_ARG_REG_MASKED(reg, val) \
+ ".byte "__stringify(ARG_TYPE_REG_MASKED)" \n\t" \
+ ".byte "#reg" \n\t" \
+ ".short 0 \n\t" \
+ ".word "#val" \n\t"
+
+#define TEST_ARG_END(flags) \
+ ".byte "__stringify(ARG_TYPE_END)" \n\t" \
+ ".byte "TEST_ISA flags" \n\t" \
+ ".short 50f-0f \n\t" \
+ ".short 2f-0f \n\t" \
+ ".short 99f-0f \n\t" \
+ ".code "TEST_ISA" \n\t" \
+ "0: \n\t"
+
+#define TEST_INSTRUCTION(instruction) \
+ "50: nop \n\t" \
+ "1: "instruction" \n\t" \
+ " nop \n\t"
+
+#define TEST_BRANCH_F(instruction) \
+ TEST_INSTRUCTION(instruction) \
+ " b 99f \n\t" \
+ "2: nop \n\t"
+
+#define TEST_BRANCH_B(instruction) \
+ " b 50f \n\t" \
+ " b 99f \n\t" \
+ "2: nop \n\t" \
+ " b 99f \n\t" \
+ TEST_INSTRUCTION(instruction)
+
+#define TEST_BRANCH_FX(instruction, codex) \
+ TEST_INSTRUCTION(instruction) \
+ " b 99f \n\t" \
+ codex" \n\t" \
+ " b 99f \n\t" \
+ "2: nop \n\t"
+
+#define TEST_BRANCH_BX(instruction, codex) \
+ " b 50f \n\t" \
+ " b 99f \n\t" \
+ "2: nop \n\t" \
+ " b 99f \n\t" \
+ codex" \n\t" \
+ TEST_INSTRUCTION(instruction)
+
+#define TESTCASE_END \
+ "2: \n\t" \
+ "99: \n\t" \
+ " bl __kprobes_test_case_end_"TEST_ISA" \n\t" \
+ ".code "NORMAL_ISA" \n\t" \
+ : : \
+ : "r0", "r1", "r2", "r3", "ip", "lr", "memory", "cc" \
+ );
+
+
+/*
+ * Macros to define test cases.
+ *
+ * Those of the form TEST_{R,P,M}* can be used to define test cases
+ * which take combinations of the three basic types of arguments. E.g.
+ *
+ * TEST_R One register argument
+ * TEST_RR Two register arguments
+ * TEST_RPR A register, a pointer, then a register argument
+ *
+ * For testing instructions which may branch, there are macros TEST_BF_*
+ * and TEST_BB_* for branching forwards and backwards.
+ *
+ * TEST_SUPPORTED and TEST_UNSUPPORTED don't cause the code to be executed,
+ * the just verify that a kprobe is or is not allowed on the given instruction.
+ */
+
+#define TEST(code) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code) \
+ TESTCASE_END
+
+#define TEST_UNSUPPORTED(code) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("|"__stringify(ARG_FLAG_UNSUPPORTED)) \
+ TEST_INSTRUCTION(code) \
+ TESTCASE_END
+
+#define TEST_SUPPORTED(code) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("|"__stringify(ARG_FLAG_SUPPORTED)) \
+ TEST_INSTRUCTION(code) \
+ TESTCASE_END
+
+#define TEST_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg code2) \
+ TESTCASE_END
+
+#define TEST_RR(code1, reg1, val1, code2, reg2, val2, code3) \
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
+ TEST_ARG_REG(reg1, val1) \
+ TEST_ARG_REG(reg2, val2) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
+ TESTCASE_END
+
+#define TEST_RRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
+ TEST_ARG_REG(reg1, val1) \
+ TEST_ARG_REG(reg2, val2) \
+ TEST_ARG_REG(reg3, val3) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
+ TESTCASE_END
+
+#define TEST_RRRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4, reg4, val4) \
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4) \
+ TEST_ARG_REG(reg1, val1) \
+ TEST_ARG_REG(reg2, val2) \
+ TEST_ARG_REG(reg3, val3) \
+ TEST_ARG_REG(reg4, val4) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4 #reg4) \
+ TESTCASE_END
+
+#define TEST_P(code1, reg1, val1, code2) \
+ TESTCASE_START(code1 #reg1 code2) \
+ TEST_ARG_PTR(reg1, val1) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2) \
+ TESTCASE_END
+
+#define TEST_PR(code1, reg1, val1, code2, reg2, val2, code3) \
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
+ TEST_ARG_PTR(reg1, val1) \
+ TEST_ARG_REG(reg2, val2) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
+ TESTCASE_END
+
+#define TEST_RP(code1, reg1, val1, code2, reg2, val2, code3) \
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
+ TEST_ARG_REG(reg1, val1) \
+ TEST_ARG_PTR(reg2, val2) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3) \
+ TESTCASE_END
+
+#define TEST_PRR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
+ TEST_ARG_PTR(reg1, val1) \
+ TEST_ARG_REG(reg2, val2) \
+ TEST_ARG_REG(reg3, val3) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
+ TESTCASE_END
+
+#define TEST_RPR(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
+ TEST_ARG_REG(reg1, val1) \
+ TEST_ARG_PTR(reg2, val2) \
+ TEST_ARG_REG(reg3, val3) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
+ TESTCASE_END
+
+#define TEST_RRP(code1, reg1, val1, code2, reg2, val2, code3, reg3, val3, code4)\
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
+ TEST_ARG_REG(reg1, val1) \
+ TEST_ARG_REG(reg2, val2) \
+ TEST_ARG_PTR(reg3, val3) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg1 code2 #reg2 code3 #reg3 code4) \
+ TESTCASE_END
+
+#define TEST_BF_P(code1, reg1, val1, code2) \
+ TESTCASE_START(code1 #reg1 code2) \
+ TEST_ARG_PTR(reg1, val1) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_F(code1 #reg1 code2) \
+ TESTCASE_END
+
+#define TEST_BF(code) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_F(code) \
+ TESTCASE_END
+
+#define TEST_BB(code) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_B(code) \
+ TESTCASE_END
+
+#define TEST_BF_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_F(code1 #reg code2) \
+ TESTCASE_END
+
+#define TEST_BB_R(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_B(code1 #reg code2) \
+ TESTCASE_END
+
+#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
+ TEST_ARG_REG(reg1, val1) \
+ TEST_ARG_REG(reg2, val2) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \
+ TESTCASE_END
+
+#define TEST_BF_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_FX(code, codex) \
+ TESTCASE_END
+
+#define TEST_BB_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_BX(code, codex) \
+ TESTCASE_END
+
+#define TEST_BF_RX(code1, reg, val, code2, codex) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_FX(code1 #reg code2, codex) \
+ TESTCASE_END
+
+#define TEST_X(code, codex) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code) \
+ " b 99f \n\t" \
+ " "codex" \n\t" \
+ TESTCASE_END
+
+#define TEST_RX(code1, reg, val, code2, codex) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG(reg, val) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 __stringify(reg) code2) \
+ " b 99f \n\t" \
+ " "codex" \n\t" \
+ TESTCASE_END
+
+#define TEST_RRX(code1, reg1, val1, code2, reg2, val2, code3, codex) \
+ TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
+ TEST_ARG_REG(reg1, val1) \
+ TEST_ARG_REG(reg2, val2) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 __stringify(reg1) code2 __stringify(reg2) code3) \
+ " b 99f \n\t" \
+ " "codex" \n\t" \
+ TESTCASE_END
+
+#define TEST_RMASKED(code1, reg, mask, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_REG_MASKED(reg, mask) \
+ TEST_ARG_END("") \
+ TEST_INSTRUCTION(code1 #reg code2) \
+ TESTCASE_END
+
+/*
+ * We ignore the state of the imprecise abort disable flag (CPSR.A) because this
+ * can change randomly as the kernel doesn't take care to preserve or initialise
+ * this across context switches. Also, with Security Extensions, the flag may
+ * not be under control of the kernel; for this reason we ignore the state of
+ * the FIQ disable flag CPSR.F as well.
+ */
+#define PSR_IGNORE_BITS (PSR_A_BIT | PSR_F_BIT)
+
+
+/*
+ * Macros for defining space directives spread over multiple lines.
+ * These are required so the compiler guesses better the length of inline asm
+ * code and will spill the literal pool early enough to avoid generating PC
+ * relative loads with out of range offsets.
+ */
+#define TWICE(x) x x
+#define SPACE_0x8 TWICE(".space 4\n\t")
+#define SPACE_0x10 TWICE(SPACE_0x8)
+#define SPACE_0x20 TWICE(SPACE_0x10)
+#define SPACE_0x40 TWICE(SPACE_0x20)
+#define SPACE_0x80 TWICE(SPACE_0x40)
+#define SPACE_0x100 TWICE(SPACE_0x80)
+#define SPACE_0x200 TWICE(SPACE_0x100)
+#define SPACE_0x400 TWICE(SPACE_0x200)
+#define SPACE_0x800 TWICE(SPACE_0x400)
+#define SPACE_0x1000 TWICE(SPACE_0x800)
+
+
+/* Various values used in test cases... */
+#define N(val) (val ^ 0xffffffff)
+#define VAL1 0x12345678
+#define VAL2 N(VAL1)
+#define VAL3 0xa5f801
+#define VAL4 N(VAL3)
+#define VALM 0x456789ab
+#define VALR 0xdeaddead
+#define HH1 0x0123fecb
+#define HH2 0xa9874567
+
+
+#ifdef CONFIG_THUMB2_KERNEL
+void kprobe_thumb16_test_cases(void);
+void kprobe_thumb32_test_cases(void);
+#else
+void kprobe_arm_test_cases(void);
+#endif
diff --git a/arch/arm/probes/kprobes/test-thumb.c b/arch/arm/probes/kprobes/test-thumb.c
new file mode 100644
index 000000000..4254391f3
--- /dev/null
+++ b/arch/arm/probes/kprobes/test-thumb.c
@@ -0,0 +1,1200 @@
+/*
+ * arch/arm/probes/kprobes/test-thumb.c
+ *
+ * Copyright (C) 2011 Jon Medhurst <tixy@yxit.co.uk>.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <asm/opcodes.h>
+#include <asm/probes.h>
+
+#include "test-core.h"
+
+
+#define TEST_ISA "16"
+
+#define DONT_TEST_IN_ITBLOCK(tests) \
+ kprobe_test_flags |= TEST_FLAG_NO_ITBLOCK; \
+ tests \
+ kprobe_test_flags &= ~TEST_FLAG_NO_ITBLOCK;
+
+#define CONDITION_INSTRUCTIONS(cc_pos, tests) \
+ kprobe_test_cc_position = cc_pos; \
+ DONT_TEST_IN_ITBLOCK(tests) \
+ kprobe_test_cc_position = 0;
+
+#define TEST_ITBLOCK(code) \
+ kprobe_test_flags |= TEST_FLAG_FULL_ITBLOCK; \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ "50: nop \n\t" \
+ "1: "code" \n\t" \
+ " mov r1, #0x11 \n\t" \
+ " mov r2, #0x22 \n\t" \
+ " mov r3, #0x33 \n\t" \
+ "2: nop \n\t" \
+ TESTCASE_END \
+ kprobe_test_flags &= ~TEST_FLAG_FULL_ITBLOCK;
+
+#define TEST_THUMB_TO_ARM_INTERWORK_P(code1, reg, val, code2) \
+ TESTCASE_START(code1 #reg code2) \
+ TEST_ARG_PTR(reg, val) \
+ TEST_ARG_REG(14, 99f+1) \
+ TEST_ARG_MEM(15, 3f) \
+ TEST_ARG_END("") \
+ " nop \n\t" /* To align 1f */ \
+ "50: nop \n\t" \
+ "1: "code1 #reg code2" \n\t" \
+ " bx lr \n\t" \
+ ".arm \n\t" \
+ "3: adr lr, 2f+1 \n\t" \
+ " bx lr \n\t" \
+ ".thumb \n\t" \
+ "2: nop \n\t" \
+ TESTCASE_END
+
+
+void kprobe_thumb16_test_cases(void)
+{
+ kprobe_test_flags = TEST_FLAG_NARROW_INSTR;
+
+ TEST_GROUP("Shift (immediate), add, subtract, move, and compare")
+
+ TEST_R( "lsls r7, r",0,VAL1,", #5")
+ TEST_R( "lsls r0, r",7,VAL2,", #11")
+ TEST_R( "lsrs r7, r",0,VAL1,", #5")
+ TEST_R( "lsrs r0, r",7,VAL2,", #11")
+ TEST_R( "asrs r7, r",0,VAL1,", #5")
+ TEST_R( "asrs r0, r",7,VAL2,", #11")
+ TEST_RR( "adds r2, r",0,VAL1,", r",7,VAL2,"")
+ TEST_RR( "adds r5, r",7,VAL2,", r",0,VAL2,"")
+ TEST_RR( "subs r2, r",0,VAL1,", r",7,VAL2,"")
+ TEST_RR( "subs r5, r",7,VAL2,", r",0,VAL2,"")
+ TEST_R( "adds r7, r",0,VAL1,", #5")
+ TEST_R( "adds r0, r",7,VAL2,", #2")
+ TEST_R( "subs r7, r",0,VAL1,", #5")
+ TEST_R( "subs r0, r",7,VAL2,", #2")
+ TEST( "movs.n r0, #0x5f")
+ TEST( "movs.n r7, #0xa0")
+ TEST_R( "cmp.n r",0,0x5e, ", #0x5f")
+ TEST_R( "cmp.n r",5,0x15f,", #0x5f")
+ TEST_R( "cmp.n r",7,0xa0, ", #0xa0")
+ TEST_R( "adds.n r",0,VAL1,", #0x5f")
+ TEST_R( "adds.n r",7,VAL2,", #0xa0")
+ TEST_R( "subs.n r",0,VAL1,", #0x5f")
+ TEST_R( "subs.n r",7,VAL2,", #0xa0")
+
+ TEST_GROUP("16-bit Thumb data-processing instructions")
+
+#define DATA_PROCESSING16(op,val) \
+ TEST_RR( op" r",0,VAL1,", r",7,val,"") \
+ TEST_RR( op" r",7,VAL2,", r",0,val,"")
+
+ DATA_PROCESSING16("ands",0xf00f00ff)
+ DATA_PROCESSING16("eors",0xf00f00ff)
+ DATA_PROCESSING16("lsls",11)
+ DATA_PROCESSING16("lsrs",11)
+ DATA_PROCESSING16("asrs",11)
+ DATA_PROCESSING16("adcs",VAL2)
+ DATA_PROCESSING16("sbcs",VAL2)
+ DATA_PROCESSING16("rors",11)
+ DATA_PROCESSING16("tst",0xf00f00ff)
+ TEST_R("rsbs r",0,VAL1,", #0")
+ TEST_R("rsbs r",7,VAL2,", #0")
+ DATA_PROCESSING16("cmp",0xf00f00ff)
+ DATA_PROCESSING16("cmn",0xf00f00ff)
+ DATA_PROCESSING16("orrs",0xf00f00ff)
+ DATA_PROCESSING16("muls",VAL2)
+ DATA_PROCESSING16("bics",0xf00f00ff)
+ DATA_PROCESSING16("mvns",VAL2)
+
+ TEST_GROUP("Special data instructions and branch and exchange")
+
+ TEST_RR( "add r",0, VAL1,", r",7,VAL2,"")
+ TEST_RR( "add r",3, VAL2,", r",8,VAL3,"")
+ TEST_RR( "add r",8, VAL3,", r",0,VAL1,"")
+ TEST_R( "add sp" ", r",8,-8, "")
+ TEST_R( "add r",14,VAL1,", pc")
+ TEST_BF_R("add pc" ", r",0,2f-1f-8,"")
+ TEST_UNSUPPORTED(__inst_thumb16(0x44ff) " @ add pc, pc")
+
+ TEST_RR( "cmp r",3,VAL1,", r",8,VAL2,"")
+ TEST_RR( "cmp r",8,VAL2,", r",0,VAL1,"")
+ TEST_R( "cmp sp" ", r",8,-8, "")
+
+ TEST_R( "mov r0, r",7,VAL2,"")
+ TEST_R( "mov r3, r",8,VAL3,"")
+ TEST_R( "mov r8, r",0,VAL1,"")
+ TEST_P( "mov sp, r",8,-8, "")
+ TEST( "mov lr, pc")
+ TEST_BF_R("mov pc, r",0,2f, "")
+
+ TEST_BF_R("bx r",0, 2f+1,"")
+ TEST_BF_R("bx r",14,2f+1,"")
+ TESTCASE_START("bx pc")
+ TEST_ARG_REG(14, 99f+1)
+ TEST_ARG_END("")
+ " nop \n\t" /* To align the bx pc*/
+ "50: nop \n\t"
+ "1: bx pc \n\t"
+ " bx lr \n\t"
+ ".arm \n\t"
+ " adr lr, 2f+1 \n\t"
+ " bx lr \n\t"
+ ".thumb \n\t"
+ "2: nop \n\t"
+ TESTCASE_END
+
+ TEST_BF_R("blx r",0, 2f+1,"")
+ TEST_BB_R("blx r",14,2f+1,"")
+ TEST_UNSUPPORTED(__inst_thumb16(0x47f8) " @ blx pc")
+
+ TEST_GROUP("Load from Literal Pool")
+
+ TEST_X( "ldr r0, 3f",
+ ".align \n\t"
+ "3: .word "__stringify(VAL1))
+ TEST_X( "ldr r7, 3f",
+ ".space 128 \n\t"
+ ".align \n\t"
+ "3: .word "__stringify(VAL2))
+
+ TEST_GROUP("16-bit Thumb Load/store instructions")
+
+ TEST_RPR("str r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
+ TEST_RPR("str r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
+ TEST_RPR("strh r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
+ TEST_RPR("strh r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
+ TEST_RPR("strb r",0, VAL1,", [r",1, 24,", r",2, 48,"]")
+ TEST_RPR("strb r",7, VAL2,", [r",6, 24,", r",5, 48,"]")
+ TEST_PR( "ldrsb r0, [r",1, 24,", r",2, 48,"]")
+ TEST_PR( "ldrsb r7, [r",6, 24,", r",5, 50,"]")
+ TEST_PR( "ldr r0, [r",1, 24,", r",2, 48,"]")
+ TEST_PR( "ldr r7, [r",6, 24,", r",5, 48,"]")
+ TEST_PR( "ldrh r0, [r",1, 24,", r",2, 48,"]")
+ TEST_PR( "ldrh r7, [r",6, 24,", r",5, 50,"]")
+ TEST_PR( "ldrb r0, [r",1, 24,", r",2, 48,"]")
+ TEST_PR( "ldrb r7, [r",6, 24,", r",5, 50,"]")
+ TEST_PR( "ldrsh r0, [r",1, 24,", r",2, 48,"]")
+ TEST_PR( "ldrsh r7, [r",6, 24,", r",5, 50,"]")
+
+ TEST_RP("str r",0, VAL1,", [r",1, 24,", #120]")
+ TEST_RP("str r",7, VAL2,", [r",6, 24,", #120]")
+ TEST_P( "ldr r0, [r",1, 24,", #120]")
+ TEST_P( "ldr r7, [r",6, 24,", #120]")
+ TEST_RP("strb r",0, VAL1,", [r",1, 24,", #30]")
+ TEST_RP("strb r",7, VAL2,", [r",6, 24,", #30]")
+ TEST_P( "ldrb r0, [r",1, 24,", #30]")
+ TEST_P( "ldrb r7, [r",6, 24,", #30]")
+ TEST_RP("strh r",0, VAL1,", [r",1, 24,", #60]")
+ TEST_RP("strh r",7, VAL2,", [r",6, 24,", #60]")
+ TEST_P( "ldrh r0, [r",1, 24,", #60]")
+ TEST_P( "ldrh r7, [r",6, 24,", #60]")
+
+ TEST_R( "str r",0, VAL1,", [sp, #0]")
+ TEST_R( "str r",7, VAL2,", [sp, #160]")
+ TEST( "ldr r0, [sp, #0]")
+ TEST( "ldr r7, [sp, #160]")
+
+ TEST_RP("str r",0, VAL1,", [r",0, 24,"]")
+ TEST_P( "ldr r0, [r",0, 24,"]")
+
+ TEST_GROUP("Generate PC-/SP-relative address")
+
+ TEST("add r0, pc, #4")
+ TEST("add r7, pc, #1020")
+ TEST("add r0, sp, #4")
+ TEST("add r7, sp, #1020")
+
+ TEST_GROUP("Miscellaneous 16-bit instructions")
+
+ TEST_UNSUPPORTED( "cpsie i")
+ TEST_UNSUPPORTED( "cpsid i")
+ TEST_UNSUPPORTED( "setend le")
+ TEST_UNSUPPORTED( "setend be")
+
+ TEST("add sp, #"__stringify(TEST_MEMORY_SIZE)) /* Assumes TEST_MEMORY_SIZE < 0x400 */
+ TEST("sub sp, #0x7f*4")
+
+DONT_TEST_IN_ITBLOCK(
+ TEST_BF_R( "cbnz r",0,0, ", 2f")
+ TEST_BF_R( "cbz r",2,-1,", 2f")
+ TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20)
+ TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40)
+)
+ TEST_R("sxth r0, r",7, HH1,"")
+ TEST_R("sxth r7, r",0, HH2,"")
+ TEST_R("sxtb r0, r",7, HH1,"")
+ TEST_R("sxtb r7, r",0, HH2,"")
+ TEST_R("uxth r0, r",7, HH1,"")
+ TEST_R("uxth r7, r",0, HH2,"")
+ TEST_R("uxtb r0, r",7, HH1,"")
+ TEST_R("uxtb r7, r",0, HH2,"")
+ TEST_R("rev r0, r",7, VAL1,"")
+ TEST_R("rev r7, r",0, VAL2,"")
+ TEST_R("rev16 r0, r",7, VAL1,"")
+ TEST_R("rev16 r7, r",0, VAL2,"")
+ TEST_UNSUPPORTED(__inst_thumb16(0xba80) "")
+ TEST_UNSUPPORTED(__inst_thumb16(0xbabf) "")
+ TEST_R("revsh r0, r",7, VAL1,"")
+ TEST_R("revsh r7, r",0, VAL2,"")
+
+#define TEST_POPPC(code, offset) \
+ TESTCASE_START(code) \
+ TEST_ARG_PTR(13, offset) \
+ TEST_ARG_END("") \
+ TEST_BRANCH_F(code) \
+ TESTCASE_END
+
+ TEST("push {r0}")
+ TEST("push {r7}")
+ TEST("push {r14}")
+ TEST("push {r0-r7,r14}")
+ TEST("push {r0,r2,r4,r6,r14}")
+ TEST("push {r1,r3,r5,r7}")
+ TEST("pop {r0}")
+ TEST("pop {r7}")
+ TEST("pop {r0,r2,r4,r6}")
+ TEST_POPPC("pop {pc}",15*4)
+ TEST_POPPC("pop {r0-r7,pc}",7*4)
+ TEST_POPPC("pop {r1,r3,r5,r7,pc}",11*4)
+ TEST_THUMB_TO_ARM_INTERWORK_P("pop {pc} @ ",13,15*4,"")
+ TEST_THUMB_TO_ARM_INTERWORK_P("pop {r0-r7,pc} @ ",13,7*4,"")
+
+ TEST_UNSUPPORTED("bkpt.n 0")
+ TEST_UNSUPPORTED("bkpt.n 255")
+
+ TEST_SUPPORTED("yield")
+ TEST("sev")
+ TEST("nop")
+ TEST("wfi")
+ TEST_SUPPORTED("wfe")
+ TEST_UNSUPPORTED(__inst_thumb16(0xbf50) "") /* Unassigned hints */
+ TEST_UNSUPPORTED(__inst_thumb16(0xbff0) "") /* Unassigned hints */
+
+#define TEST_IT(code, code2) \
+ TESTCASE_START(code) \
+ TEST_ARG_END("") \
+ "50: nop \n\t" \
+ "1: "code" \n\t" \
+ " "code2" \n\t" \
+ "2: nop \n\t" \
+ TESTCASE_END
+
+DONT_TEST_IN_ITBLOCK(
+ TEST_IT("it eq","moveq r0,#0")
+ TEST_IT("it vc","movvc r0,#0")
+ TEST_IT("it le","movle r0,#0")
+ TEST_IT("ite eq","moveq r0,#0\n\t movne r1,#1")
+ TEST_IT("itet vc","movvc r0,#0\n\t movvs r1,#1\n\t movvc r2,#2")
+ TEST_IT("itete le","movle r0,#0\n\t movgt r1,#1\n\t movle r2,#2\n\t movgt r3,#3")
+ TEST_IT("itttt le","movle r0,#0\n\t movle r1,#1\n\t movle r2,#2\n\t movle r3,#3")
+ TEST_IT("iteee le","movle r0,#0\n\t movgt r1,#1\n\t movgt r2,#2\n\t movgt r3,#3")
+)
+
+ TEST_GROUP("Load and store multiple")
+
+ TEST_P("ldmia r",4, 16*4,"!, {r0,r7}")
+ TEST_P("ldmia r",7, 16*4,"!, {r0-r6}")
+ TEST_P("stmia r",4, 16*4,"!, {r0,r7}")
+ TEST_P("stmia r",0, 16*4,"!, {r0-r7}")
+
+ TEST_GROUP("Conditional branch and Supervisor Call instructions")
+
+CONDITION_INSTRUCTIONS(8,
+ TEST_BF("beq 2f")
+ TEST_BB("bne 2b")
+ TEST_BF("bgt 2f")
+ TEST_BB("blt 2b")
+)
+ TEST_UNSUPPORTED(__inst_thumb16(0xde00) "")
+ TEST_UNSUPPORTED(__inst_thumb16(0xdeff) "")
+ TEST_UNSUPPORTED("svc #0x00")
+ TEST_UNSUPPORTED("svc #0xff")
+
+ TEST_GROUP("Unconditional branch")
+
+ TEST_BF( "b 2f")
+ TEST_BB( "b 2b")
+ TEST_BF_X("b 2f", SPACE_0x400)
+ TEST_BB_X("b 2b", SPACE_0x400)
+
+ TEST_GROUP("Testing instructions in IT blocks")
+
+ TEST_ITBLOCK("subs.n r0, r0")
+
+ verbose("\n");
+}
+
+
+void kprobe_thumb32_test_cases(void)
+{
+ kprobe_test_flags = 0;
+
+ TEST_GROUP("Load/store multiple")
+
+ TEST_UNSUPPORTED("rfedb sp")
+ TEST_UNSUPPORTED("rfeia sp")
+ TEST_UNSUPPORTED("rfedb sp!")
+ TEST_UNSUPPORTED("rfeia sp!")
+
+ TEST_P( "stmia r",0, 16*4,", {r0,r8}")
+ TEST_P( "stmia r",4, 16*4,", {r0-r12,r14}")
+ TEST_P( "stmia r",7, 16*4,"!, {r8-r12,r14}")
+ TEST_P( "stmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+
+ TEST_P( "ldmia r",0, 16*4,", {r0,r8}")
+ TEST_P( "ldmia r",4, 0, ", {r0-r12,r14}")
+ TEST_BF_P("ldmia r",5, 8*4, "!, {r6-r12,r15}")
+ TEST_P( "ldmia r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_BF_P("ldmia r",14,14*4,"!, {r4,pc}")
+
+ TEST_P( "stmdb r",0, 16*4,", {r0,r8}")
+ TEST_P( "stmdb r",4, 16*4,", {r0-r12,r14}")
+ TEST_P( "stmdb r",5, 16*4,"!, {r8-r12,r14}")
+ TEST_P( "stmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+
+ TEST_P( "ldmdb r",0, 16*4,", {r0,r8}")
+ TEST_P( "ldmdb r",4, 16*4,", {r0-r12,r14}")
+ TEST_BF_P("ldmdb r",5, 16*4,"!, {r6-r12,r15}")
+ TEST_P( "ldmdb r",12,16*4,"!, {r1,r3,r5,r7,r8-r11,r14}")
+ TEST_BF_P("ldmdb r",14,16*4,"!, {r4,pc}")
+
+ TEST_P( "stmdb r",13,16*4,"!, {r3-r12,lr}")
+ TEST_P( "stmdb r",13,16*4,"!, {r3-r12}")
+ TEST_P( "stmdb r",2, 16*4,", {r3-r12,lr}")
+ TEST_P( "stmdb r",13,16*4,"!, {r2-r12,lr}")
+ TEST_P( "stmdb r",0, 16*4,", {r0-r12}")
+ TEST_P( "stmdb r",0, 16*4,", {r0-r12,lr}")
+
+ TEST_BF_P("ldmia r",13,5*4, "!, {r3-r12,pc}")
+ TEST_P( "ldmia r",13,5*4, "!, {r3-r12}")
+ TEST_BF_P("ldmia r",2, 5*4, "!, {r3-r12,pc}")
+ TEST_BF_P("ldmia r",13,4*4, "!, {r2-r12,pc}")
+ TEST_P( "ldmia r",0, 16*4,", {r0-r12}")
+ TEST_P( "ldmia r",0, 16*4,", {r0-r12,lr}")
+
+ TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",0,14*4,", {r12,pc}")
+ TEST_THUMB_TO_ARM_INTERWORK_P("ldmia r",13,2*4,", {r0-r12,pc}")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xe88f0101) " @ stmia pc, {r0,r8}")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe92f5f00) " @ stmdb pc!, {r8-r12,r14}")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe8bdc000) " @ ldmia r13!, {r14,pc}")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe93ec000) " @ ldmdb r14!, {r14,pc}")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe8a73f00) " @ stmia r7!, {r8-r12,sp}")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe8a79f00) " @ stmia r7!, {r8-r12,pc}")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe93e2010) " @ ldmdb r14!, {r4,sp}")
+
+ TEST_GROUP("Load/store double or exclusive, table branch")
+
+ TEST_P( "ldrd r0, r1, [r",1, 24,", #-16]")
+ TEST( "ldrd r12, r14, [sp, #16]")
+ TEST_P( "ldrd r1, r0, [r",7, 24,", #-16]!")
+ TEST( "ldrd r14, r12, [sp, #16]!")
+ TEST_P( "ldrd r1, r0, [r",7, 24,"], #16")
+ TEST( "ldrd r7, r8, [sp], #-16")
+
+ TEST_X( "ldrd r12, r14, 3f",
+ ".align 3 \n\t"
+ "3: .word "__stringify(VAL1)" \n\t"
+ " .word "__stringify(VAL2))
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xe9ffec04) " @ ldrd r14, r12, [pc, #16]!")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe8ffec04) " @ ldrd r14, r12, [pc], #16")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe9d4d800) " @ ldrd sp, r8, [r4]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe9d4f800) " @ ldrd pc, r8, [r4]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe9d47d00) " @ ldrd r7, sp, [r4]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe9d47f00) " @ ldrd r7, pc, [r4]")
+
+ TEST_RRP("strd r",0, VAL1,", r",1, VAL2,", [r",1, 24,", #-16]")
+ TEST_RR( "strd r",12,VAL2,", r",14,VAL1,", [sp, #16]")
+ TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,", #-16]!")
+ TEST_RR( "strd r",14,VAL2,", r",12,VAL1,", [sp, #16]!")
+ TEST_RRP("strd r",1, VAL1,", r",0, VAL2,", [r",7, 24,"], #16")
+ TEST_RR( "strd r",7, VAL2,", r",8, VAL1,", [sp], #-16")
+ TEST_RRP("strd r",6, VAL1,", r",7, VAL2,", [r",13, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!")
+ TEST_UNSUPPORTED("strd r6, r7, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_RRP("strd r",4, VAL1,", r",5, VAL2,", [r",14, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe9efec04) " @ strd r14, r12, [pc, #16]!")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe8efec04) " @ strd r14, r12, [pc], #16")
+
+ TEST_RX("tbb [pc, r",0, (9f-(1f+4)),"]",
+ "9: \n\t"
+ ".byte (2f-1b-4)>>1 \n\t"
+ ".byte (3f-1b-4)>>1 \n\t"
+ "3: mvn r0, r0 \n\t"
+ "2: nop \n\t")
+
+ TEST_RX("tbb [pc, r",4, (9f-(1f+4)+1),"]",
+ "9: \n\t"
+ ".byte (2f-1b-4)>>1 \n\t"
+ ".byte (3f-1b-4)>>1 \n\t"
+ "3: mvn r0, r0 \n\t"
+ "2: nop \n\t")
+
+ TEST_RRX("tbb [r",1,9f,", r",2,0,"]",
+ "9: \n\t"
+ ".byte (2f-1b-4)>>1 \n\t"
+ ".byte (3f-1b-4)>>1 \n\t"
+ "3: mvn r0, r0 \n\t"
+ "2: nop \n\t")
+
+ TEST_RX("tbh [pc, r",7, (9f-(1f+4))>>1,", lsl #1]",
+ "9: \n\t"
+ ".short (2f-1b-4)>>1 \n\t"
+ ".short (3f-1b-4)>>1 \n\t"
+ "3: mvn r0, r0 \n\t"
+ "2: nop \n\t")
+
+ TEST_RX("tbh [pc, r",12, ((9f-(1f+4))>>1)+1,", lsl #1]",
+ "9: \n\t"
+ ".short (2f-1b-4)>>1 \n\t"
+ ".short (3f-1b-4)>>1 \n\t"
+ "3: mvn r0, r0 \n\t"
+ "2: nop \n\t")
+
+ TEST_RRX("tbh [r",1,9f, ", r",14,1,", lsl #1]",
+ "9: \n\t"
+ ".short (2f-1b-4)>>1 \n\t"
+ ".short (3f-1b-4)>>1 \n\t"
+ "3: mvn r0, r0 \n\t"
+ "2: nop \n\t")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01f) " @ tbh [r1, pc]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe8d1f01d) " @ tbh [r1, sp]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xe8ddf012) " @ tbh [sp, r2]")
+
+ TEST_UNSUPPORTED("strexb r0, r1, [r2]")
+ TEST_UNSUPPORTED("strexh r0, r1, [r2]")
+ TEST_UNSUPPORTED("strexd r0, r1, r2, [r2]")
+ TEST_UNSUPPORTED("ldrexb r0, [r1]")
+ TEST_UNSUPPORTED("ldrexh r0, [r1]")
+ TEST_UNSUPPORTED("ldrexd r0, r1, [r1]")
+
+ TEST_GROUP("Data-processing (shifted register) and (modified immediate)")
+
+#define _DATA_PROCESSING32_DNM(op,s,val) \
+ TEST_RR(op s".w r0, r",1, VAL1,", r",2, val, "") \
+ TEST_RR(op s" r1, r",1, VAL1,", r",2, val, ", lsl #3") \
+ TEST_RR(op s" r2, r",3, VAL1,", r",2, val, ", lsr #4") \
+ TEST_RR(op s" r3, r",3, VAL1,", r",2, val, ", asr #5") \
+ TEST_RR(op s" r4, r",5, VAL1,", r",2, N(val),", asr #6") \
+ TEST_RR(op s" r5, r",5, VAL1,", r",2, val, ", ror #7") \
+ TEST_RR(op s" r8, r",9, VAL1,", r",10,val, ", rrx") \
+ TEST_R( op s" r0, r",11,VAL1,", #0x00010001") \
+ TEST_R( op s" r11, r",0, VAL1,", #0xf5000000") \
+ TEST_R( op s" r7, r",8, VAL2,", #0x000af000")
+
+#define DATA_PROCESSING32_DNM(op,val) \
+ _DATA_PROCESSING32_DNM(op,"",val) \
+ _DATA_PROCESSING32_DNM(op,"s",val)
+
+#define DATA_PROCESSING32_NM(op,val) \
+ TEST_RR(op".w r",1, VAL1,", r",2, val, "") \
+ TEST_RR(op" r",1, VAL1,", r",2, val, ", lsl #3") \
+ TEST_RR(op" r",3, VAL1,", r",2, val, ", lsr #4") \
+ TEST_RR(op" r",3, VAL1,", r",2, val, ", asr #5") \
+ TEST_RR(op" r",5, VAL1,", r",2, N(val),", asr #6") \
+ TEST_RR(op" r",5, VAL1,", r",2, val, ", ror #7") \
+ TEST_RR(op" r",9, VAL1,", r",10,val, ", rrx") \
+ TEST_R( op" r",11,VAL1,", #0x00010001") \
+ TEST_R( op" r",0, VAL1,", #0xf5000000") \
+ TEST_R( op" r",8, VAL2,", #0x000af000")
+
+#define _DATA_PROCESSING32_DM(op,s,val) \
+ TEST_R( op s".w r0, r",14, val, "") \
+ TEST_R( op s" r1, r",12, val, ", lsl #3") \
+ TEST_R( op s" r2, r",11, val, ", lsr #4") \
+ TEST_R( op s" r3, r",10, val, ", asr #5") \
+ TEST_R( op s" r4, r",9, N(val),", asr #6") \
+ TEST_R( op s" r5, r",8, val, ", ror #7") \
+ TEST_R( op s" r8, r",7,val, ", rrx") \
+ TEST( op s" r0, #0x00010001") \
+ TEST( op s" r11, #0xf5000000") \
+ TEST( op s" r7, #0x000af000") \
+ TEST( op s" r4, #0x00005a00")
+
+#define DATA_PROCESSING32_DM(op,val) \
+ _DATA_PROCESSING32_DM(op,"",val) \
+ _DATA_PROCESSING32_DM(op,"s",val)
+
+ DATA_PROCESSING32_DNM("and",0xf00f00ff)
+ DATA_PROCESSING32_NM("tst",0xf00f00ff)
+ DATA_PROCESSING32_DNM("bic",0xf00f00ff)
+ DATA_PROCESSING32_DNM("orr",0xf00f00ff)
+ DATA_PROCESSING32_DM("mov",VAL2)
+ DATA_PROCESSING32_DNM("orn",0xf00f00ff)
+ DATA_PROCESSING32_DM("mvn",VAL2)
+ DATA_PROCESSING32_DNM("eor",0xf00f00ff)
+ DATA_PROCESSING32_NM("teq",0xf00f00ff)
+ DATA_PROCESSING32_DNM("add",VAL2)
+ DATA_PROCESSING32_NM("cmn",VAL2)
+ DATA_PROCESSING32_DNM("adc",VAL2)
+ DATA_PROCESSING32_DNM("sbc",VAL2)
+ DATA_PROCESSING32_DNM("sub",VAL2)
+ DATA_PROCESSING32_NM("cmp",VAL2)
+ DATA_PROCESSING32_DNM("rsb",VAL2)
+
+ TEST_RR("pkhbt r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR("pkhbt r14,r",12, HH1,", r",10,HH2,", lsl #2")
+ TEST_RR("pkhtb r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR("pkhtb r14,r",12, HH1,", r",10,HH2,", asr #2")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xea170f0d) " @ tst.w r7, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea170f0f) " @ tst.w r7, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea1d0f07) " @ tst.w sp, r7")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea1f0f07) " @ tst.w pc, r7")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf01d1f08) " @ tst sp, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf01f1f08) " @ tst pc, #0x00080008")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xea970f0d) " @ teq.w r7, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea970f0f) " @ teq.w r7, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea9d0f07) " @ teq.w sp, r7")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea9f0f07) " @ teq.w pc, r7")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf09d1f08) " @ tst sp, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf09f1f08) " @ tst pc, #0x00080008")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0d) " @ cmn.w r7, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb170f0f) " @ cmn.w r7, pc")
+ TEST_P("cmn.w sp, r",7,0,"")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb1f0f07) " @ cmn.w pc, r7")
+ TEST( "cmn sp, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf11f1f08) " @ cmn pc, #0x00080008")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0d) " @ cmp.w r7, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xebb70f0f) " @ cmp.w r7, pc")
+ TEST_P("cmp.w sp, r",7,0,"")
+ TEST_UNSUPPORTED(__inst_thumb32(0xebbf0f07) " @ cmp.w pc, r7")
+ TEST( "cmp sp, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf1bf1f08) " @ cmp pc, #0x00080008")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xea5f070d) " @ movs.w r7, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea5f070f) " @ movs.w r7, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea5f0d07) " @ movs.w sp, r7")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea4f0f07) " @ mov.w pc, r7")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf04f1d08) " @ mov sp, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf04f1f08) " @ mov pc, #0x00080008")
+
+ TEST_R("add.w r0, sp, r",1, 4,"")
+ TEST_R("adds r0, sp, r",1, 4,", asl #3")
+ TEST_R("add r0, sp, r",1, 4,", asl #4")
+ TEST_R("add r0, sp, r",1, 16,", ror #1")
+ TEST_R("add.w sp, sp, r",1, 4,"")
+ TEST_R("add sp, sp, r",1, 4,", asl #3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb0d1d01) " @ add sp, sp, r1, asl #4")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d71) " @ add sp, sp, r1, ror #1")
+ TEST( "add.w r0, sp, #24")
+ TEST( "add.w sp, sp, #24")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0f01) " @ add pc, sp, r1")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000f) " @ add r0, sp, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb0d000d) " @ add r0, sp, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0f) " @ add sp, sp, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb0d0d0d) " @ add sp, sp, sp")
+
+ TEST_R("sub.w r0, sp, r",1, 4,"")
+ TEST_R("subs r0, sp, r",1, 4,", asl #3")
+ TEST_R("sub r0, sp, r",1, 4,", asl #4")
+ TEST_R("sub r0, sp, r",1, 16,", ror #1")
+ TEST_R("sub.w sp, sp, r",1, 4,"")
+ TEST_R("sub sp, sp, r",1, 4,", asl #3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xebad1d01) " @ sub sp, sp, r1, asl #4")
+ TEST_UNSUPPORTED(__inst_thumb32(0xebad0d71) " @ sub sp, sp, r1, ror #1")
+ TEST_UNSUPPORTED(__inst_thumb32(0xebad0f01) " @ sub pc, sp, r1")
+ TEST( "sub.w r0, sp, #24")
+ TEST( "sub.w sp, sp, #24")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xea02010f) " @ and r1, r2, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea0f0103) " @ and r1, pc, r3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea020f03) " @ and pc, r2, r3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea02010d) " @ and r1, r2, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea0d0103) " @ and r1, sp, r3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xea020d03) " @ and sp, r2, r3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf00d1108) " @ and r1, sp, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf00f1108) " @ and r1, pc, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf0021d08) " @ and sp, r8, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf0021f08) " @ and pc, r8, #0x00080008")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb02010f) " @ add r1, r2, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb0f0103) " @ add r1, pc, r3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb020f03) " @ add pc, r2, r3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb02010d) " @ add r1, r2, sp")
+ TEST_SUPPORTED( __inst_thumb32(0xeb0d0103) " @ add r1, sp, r3")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb020d03) " @ add sp, r2, r3")
+ TEST_SUPPORTED( __inst_thumb32(0xf10d1108) " @ add r1, sp, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf10d1f08) " @ add pc, sp, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf10f1108) " @ add r1, pc, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf1021d08) " @ add sp, r8, #0x00080008")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf1021f08) " @ add pc, r8, #0x00080008")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xeaa00000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeaf00000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb200000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeb800000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xebe00000) "")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xf0a00000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf0c00000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf0f00000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf1200000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf1800000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf1e00000) "")
+
+ TEST_GROUP("Coprocessor instructions")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xec000000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xeff00000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfff00000) "")
+
+ TEST_GROUP("Data-processing (plain binary immediate)")
+
+ TEST_R("addw r0, r",1, VAL1,", #0x123")
+ TEST( "addw r14, sp, #0xf5a")
+ TEST( "addw sp, sp, #0x20")
+ TEST( "addw r7, pc, #0x888")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf20f1f20) " @ addw pc, pc, #0x120")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf20d1f20) " @ addw pc, sp, #0x120")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf20f1d20) " @ addw sp, pc, #0x120")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2001d20) " @ addw sp, r0, #0x120")
+
+ TEST_R("subw r0, r",1, VAL1,", #0x123")
+ TEST( "subw r14, sp, #0xf5a")
+ TEST( "subw sp, sp, #0x20")
+ TEST( "subw r7, pc, #0x888")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2af1f20) " @ subw pc, pc, #0x120")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2ad1f20) " @ subw pc, sp, #0x120")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2af1d20) " @ subw sp, pc, #0x120")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2a01d20) " @ subw sp, r0, #0x120")
+
+ TEST("movw r0, #0")
+ TEST("movw r0, #0xffff")
+ TEST("movw lr, #0xffff")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2400d00) " @ movw sp, #0")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2400f00) " @ movw pc, #0")
+
+ TEST_R("movt r",0, VAL1,", #0")
+ TEST_R("movt r",0, VAL2,", #0xffff")
+ TEST_R("movt r",14,VAL1,", #0xffff")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2c00d00) " @ movt sp, #0")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf2c00f00) " @ movt pc, #0")
+
+ TEST_R( "ssat r0, #24, r",0, VAL1,"")
+ TEST_R( "ssat r14, #24, r",12, VAL2,"")
+ TEST_R( "ssat r0, #24, r",0, VAL1,", lsl #8")
+ TEST_R( "ssat r14, #24, r",12, VAL2,", asr #8")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf30c0d17) " @ ssat sp, #24, r12")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf30c0f17) " @ ssat pc, #24, r12")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf30d0c17) " @ ssat r12, #24, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf30f0c17) " @ ssat r12, #24, pc")
+
+ TEST_R( "usat r0, #24, r",0, VAL1,"")
+ TEST_R( "usat r14, #24, r",12, VAL2,"")
+ TEST_R( "usat r0, #24, r",0, VAL1,", lsl #8")
+ TEST_R( "usat r14, #24, r",12, VAL2,", asr #8")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf38c0d17) " @ usat sp, #24, r12")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf38c0f17) " @ usat pc, #24, r12")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf38d0c17) " @ usat r12, #24, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf38f0c17) " @ usat r12, #24, pc")
+
+ TEST_R( "ssat16 r0, #12, r",0, HH1,"")
+ TEST_R( "ssat16 r14, #12, r",12, HH2,"")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf32c0d0b) " @ ssat16 sp, #12, r12")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf32c0f0b) " @ ssat16 pc, #12, r12")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf32d0c0b) " @ ssat16 r12, #12, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf32f0c0b) " @ ssat16 r12, #12, pc")
+
+ TEST_R( "usat16 r0, #12, r",0, HH1,"")
+ TEST_R( "usat16 r14, #12, r",12, HH2,"")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0d0b) " @ usat16 sp, #12, r12")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3ac0f0b) " @ usat16 pc, #12, r12")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3ad0c0b) " @ usat16 r12, #12, sp")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3af0c0b) " @ usat16 r12, #12, pc")
+
+ TEST_R( "sbfx r0, r",0 , VAL1,", #0, #31")
+ TEST_R( "sbfx r14, r",12, VAL2,", #8, #16")
+ TEST_R( "sbfx r4, r",10, VAL1,", #16, #15")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf34c2d0f) " @ sbfx sp, r12, #8, #16")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf34c2f0f) " @ sbfx pc, r12, #8, #16")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf34d2c0f) " @ sbfx r12, sp, #8, #16")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf34f2c0f) " @ sbfx r12, pc, #8, #16")
+
+ TEST_R( "ubfx r0, r",0 , VAL1,", #0, #31")
+ TEST_R( "ubfx r14, r",12, VAL2,", #8, #16")
+ TEST_R( "ubfx r4, r",10, VAL1,", #16, #15")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2d0f) " @ ubfx sp, r12, #8, #16")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3cc2f0f) " @ ubfx pc, r12, #8, #16")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3cd2c0f) " @ ubfx r12, sp, #8, #16")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3cf2c0f) " @ ubfx r12, pc, #8, #16")
+
+ TEST_R( "bfc r",0, VAL1,", #4, #20")
+ TEST_R( "bfc r",14,VAL2,", #4, #20")
+ TEST_R( "bfc r",7, VAL1,", #0, #31")
+ TEST_R( "bfc r",8, VAL2,", #0, #31")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf36f0d1e) " @ bfc sp, #0, #31")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf36f0f1e) " @ bfc pc, #0, #31")
+
+ TEST_RR( "bfi r",0, VAL1,", r",0 , VAL2,", #0, #31")
+ TEST_RR( "bfi r",12,VAL1,", r",14 , VAL2,", #4, #20")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf36e1d17) " @ bfi sp, r14, #4, #20")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf36e1f17) " @ bfi pc, r14, #4, #20")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf36d1e17) " @ bfi r14, sp, #4, #20")
+
+ TEST_GROUP("Branches and miscellaneous control")
+
+CONDITION_INSTRUCTIONS(22,
+ TEST_BF("beq.w 2f")
+ TEST_BB("bne.w 2b")
+ TEST_BF("bgt.w 2f")
+ TEST_BB("blt.w 2b")
+ TEST_BF_X("bpl.w 2f", SPACE_0x1000)
+)
+
+ TEST_UNSUPPORTED("msr cpsr, r0")
+ TEST_UNSUPPORTED("msr cpsr_f, r1")
+ TEST_UNSUPPORTED("msr spsr, r2")
+
+ TEST_UNSUPPORTED("cpsie.w i")
+ TEST_UNSUPPORTED("cpsid.w i")
+ TEST_UNSUPPORTED("cps 0x13")
+
+ TEST_SUPPORTED("yield.w")
+ TEST("sev.w")
+ TEST("nop.w")
+ TEST("wfi.w")
+ TEST_SUPPORTED("wfe.w")
+ TEST_UNSUPPORTED("dbg.w #0")
+
+ TEST_UNSUPPORTED("clrex")
+ TEST_UNSUPPORTED("dsb")
+ TEST_UNSUPPORTED("dmb")
+ TEST_UNSUPPORTED("isb")
+
+ TEST_UNSUPPORTED("bxj r0")
+
+ TEST_UNSUPPORTED("subs pc, lr, #4")
+
+ TEST_RMASKED("mrs r",0,~PSR_IGNORE_BITS,", cpsr")
+ TEST_RMASKED("mrs r",14,~PSR_IGNORE_BITS,", cpsr")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8d00) " @ mrs sp, spsr")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf3ef8f00) " @ mrs pc, spsr")
+ TEST_UNSUPPORTED("mrs r0, spsr")
+ TEST_UNSUPPORTED("mrs lr, spsr")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xf7f08000) " @ smc #0")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xf7f0a000) " @ undefeined")
+
+ TEST_BF( "b.w 2f")
+ TEST_BB( "b.w 2b")
+ TEST_BF_X("b.w 2f", SPACE_0x1000)
+
+ TEST_BF( "bl.w 2f")
+ TEST_BB( "bl.w 2b")
+ TEST_BB_X("bl.w 2b", SPACE_0x1000)
+
+ TEST_X( "blx __dummy_arm_subroutine",
+ ".arm \n\t"
+ ".align \n\t"
+ ".type __dummy_arm_subroutine, %%function \n\t"
+ "__dummy_arm_subroutine: \n\t"
+ "mov r0, pc \n\t"
+ "bx lr \n\t"
+ ".thumb \n\t"
+ )
+ TEST( "blx __dummy_arm_subroutine")
+
+ TEST_GROUP("Store single data item")
+
+#define SINGLE_STORE(size) \
+ TEST_RP( "str"size" r",0, VAL1,", [r",11,-1024,", #1024]") \
+ TEST_RP( "str"size" r",14,VAL2,", [r",1, -1024,", #1080]") \
+ TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]") \
+ TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]") \
+ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #120") \
+ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #128") \
+ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, "], #-120") \
+ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, "], #-128") \
+ TEST_RP( "str"size" r",0, VAL1,", [r",11,24, ", #120]!") \
+ TEST_RP( "str"size" r",14,VAL2,", [r",1, 24, ", #128]!") \
+ TEST_RP( "str"size" r",0, VAL1,", [r",11,256, ", #-120]!") \
+ TEST_RP( "str"size" r",14,VAL2,", [r",1, 256, ", #-128]!") \
+ TEST_RPR("str"size".w r",0, VAL1,", [r",1, 0,", r",2, 4,"]") \
+ TEST_RPR("str"size" r",14,VAL2,", [r",10,0,", r",11,4,", lsl #1]") \
+ TEST_UNSUPPORTED("str"size" r0, [r13, r1]") \
+ TEST_R( "str"size".w r",7, VAL1,", [sp, #24]") \
+ TEST_RP( "str"size".w r",0, VAL2,", [r",0,0, "]") \
+ TEST_RP( "str"size" r",6, VAL1,", [r",13, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"]!") \
+ TEST_UNSUPPORTED("str"size" r6, [r13, #-"__stringify(MAX_STACK_SIZE)"-8]!") \
+ TEST_RP( "str"size" r",4, VAL2,", [r",12, TEST_MEMORY_SIZE,", #-"__stringify(MAX_STACK_SIZE)"-8]!") \
+ TEST_UNSUPPORTED("str"size"t r0, [r1, #4]")
+
+ SINGLE_STORE("b")
+ SINGLE_STORE("h")
+ SINGLE_STORE("")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xf801000d) " @ strb r0, [r1, r13]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf821000d) " @ strh r0, [r1, r13]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf841000d) " @ str r0, [r1, r13]")
+
+ TEST("str sp, [sp]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf8cfe000) " @ str r14, [pc]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf8cef000) " @ str pc, [r14]")
+
+ TEST_GROUP("Advanced SIMD element or structure load/store instructions")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xf9000000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf92fffff) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf9800000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf9efffff) "")
+
+ TEST_GROUP("Load single data item and memory hints")
+
+#define SINGLE_LOAD(size) \
+ TEST_P( "ldr"size" r0, [r",11,-1024, ", #1024]") \
+ TEST_P( "ldr"size" r14, [r",1, -1024,", #1080]") \
+ TEST_P( "ldr"size" r0, [r",11,256, ", #-120]") \
+ TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]") \
+ TEST_P( "ldr"size" r0, [r",11,24, "], #120") \
+ TEST_P( "ldr"size" r14, [r",1, 24, "], #128") \
+ TEST_P( "ldr"size" r0, [r",11,24, "], #-120") \
+ TEST_P( "ldr"size" r14, [r",1,24, "], #-128") \
+ TEST_P( "ldr"size" r0, [r",11,24, ", #120]!") \
+ TEST_P( "ldr"size" r14, [r",1, 24, ", #128]!") \
+ TEST_P( "ldr"size" r0, [r",11,256, ", #-120]!") \
+ TEST_P( "ldr"size" r14, [r",1, 256, ", #-128]!") \
+ TEST_PR("ldr"size".w r0, [r",1, 0,", r",2, 4,"]") \
+ TEST_PR("ldr"size" r14, [r",10,0,", r",11,4,", lsl #1]") \
+ TEST_X( "ldr"size".w r0, 3f", \
+ ".align 3 \n\t" \
+ "3: .word "__stringify(VAL1)) \
+ TEST_X( "ldr"size".w r14, 3f", \
+ ".align 3 \n\t" \
+ "3: .word "__stringify(VAL2)) \
+ TEST( "ldr"size".w r7, 3b") \
+ TEST( "ldr"size".w r7, [sp, #24]") \
+ TEST_P( "ldr"size".w r0, [r",0,0, "]") \
+ TEST_UNSUPPORTED("ldr"size"t r0, [r1, #4]")
+
+ SINGLE_LOAD("b")
+ SINGLE_LOAD("sb")
+ SINGLE_LOAD("h")
+ SINGLE_LOAD("sh")
+ SINGLE_LOAD("")
+
+ TEST_BF_P("ldr pc, [r",14, 15*4,"]")
+ TEST_P( "ldr sp, [r",14, 13*4,"]")
+ TEST_BF_R("ldr pc, [sp, r",14, 15*4,"]")
+ TEST_R( "ldr sp, [sp, r",14, 13*4,"]")
+ TEST_THUMB_TO_ARM_INTERWORK_P("ldr pc, [r",0,0,", #15*4]")
+ TEST_SUPPORTED("ldr sp, 99f")
+ TEST_SUPPORTED("ldr pc, 99f")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xf854700d) " @ ldr r7, [r4, sp]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf854700f) " @ ldr r7, [r4, pc]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf814700d) " @ ldrb r7, [r4, sp]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf814700f) " @ ldrb r7, [r4, pc]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf89fd004) " @ ldrb sp, 99f")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf814d008) " @ ldrb sp, [r4, r8]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf894d000) " @ ldrb sp, [r4]")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xf8600000) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xf9ffffff) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xf9500000) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xf95fffff) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xf8000800) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xf97ffaff) "") /* Unallocated space */
+
+ TEST( "pli [pc, #4]")
+ TEST( "pli [pc, #-4]")
+ TEST( "pld [pc, #4]")
+ TEST( "pld [pc, #-4]")
+
+ TEST_P( "pld [r",0,-1024,", #1024]")
+ TEST( __inst_thumb32(0xf8b0f400) " @ pldw [r0, #1024]")
+ TEST_P( "pli [r",4, 0b,", #1024]")
+ TEST_P( "pld [r",7, 120,", #-120]")
+ TEST( __inst_thumb32(0xf837fc78) " @ pldw [r7, #-120]")
+ TEST_P( "pli [r",11,120,", #-120]")
+ TEST( "pld [sp, #0]")
+
+ TEST_PR("pld [r",7, 24, ", r",0, 16,"]")
+ TEST_PR("pld [r",8, 24, ", r",12,16,", lsl #3]")
+ TEST_SUPPORTED(__inst_thumb32(0xf837f000) " @ pldw [r7, r0]")
+ TEST_SUPPORTED(__inst_thumb32(0xf838f03c) " @ pldw [r8, r12, lsl #3]");
+ TEST_RR("pli [r",12,0b,", r",0, 16,"]")
+ TEST_RR("pli [r",0, 0b,", r",12,16,", lsl #3]")
+ TEST_R( "pld [sp, r",1, 16,"]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf817f00d) " @pld [r7, sp]")
+ TEST_UNSUPPORTED(__inst_thumb32(0xf817f00f) " @pld [r7, pc]")
+
+ TEST_GROUP("Data-processing (register)")
+
+#define SHIFTS32(op) \
+ TEST_RR(op" r0, r",1, VAL1,", r",2, 3, "") \
+ TEST_RR(op" r14, r",12,VAL2,", r",11,10,"")
+
+ SHIFTS32("lsl")
+ SHIFTS32("lsls")
+ SHIFTS32("lsr")
+ SHIFTS32("lsrs")
+ SHIFTS32("asr")
+ SHIFTS32("asrs")
+ SHIFTS32("ror")
+ SHIFTS32("rors")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa01ff02) " @ lsl pc, r1, r2")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa01fd02) " @ lsl sp, r1, r2")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff002) " @ lsl r0, pc, r2")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa0df002) " @ lsl r0, sp, r2")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00f) " @ lsl r0, r1, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa01f00d) " @ lsl r0, r1, sp")
+
+ TEST_RR( "sxtah r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "sxth r8, r",7, HH1,"")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa0fff87) " @ sxth pc, r7");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa0ffd87) " @ sxth sp, r7");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88f) " @ sxth r8, pc");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa0ff88d) " @ sxth r8, sp");
+
+ TEST_RR( "uxtah r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uxtah r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "uxth r8, r",7, HH1,"")
+
+ TEST_RR( "sxtab16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "sxtb16 r8, r",7, HH1,"")
+
+ TEST_RR( "uxtab16 r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uxtab16 r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "uxtb16 r8, r",7, HH1,"")
+
+ TEST_RR( "sxtab r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "sxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "sxtb r8, r",7, HH1,"")
+
+ TEST_RR( "uxtab r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "uxtab r14,r",12, HH2,", r",10,HH1,", ror #8")
+ TEST_R( "uxtb r8, r",7, HH1,"")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa6000f0) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa7fffff) "")
+
+#define PARALLEL_ADD_SUB(op) \
+ TEST_RR( op"add16 r0, r",0, HH1,", r",1, HH2,"") \
+ TEST_RR( op"add16 r14, r",12,HH2,", r",10,HH1,"") \
+ TEST_RR( op"asx r0, r",0, HH1,", r",1, HH2,"") \
+ TEST_RR( op"asx r14, r",12,HH2,", r",10,HH1,"") \
+ TEST_RR( op"sax r0, r",0, HH1,", r",1, HH2,"") \
+ TEST_RR( op"sax r14, r",12,HH2,", r",10,HH1,"") \
+ TEST_RR( op"sub16 r0, r",0, HH1,", r",1, HH2,"") \
+ TEST_RR( op"sub16 r14, r",12,HH2,", r",10,HH1,"") \
+ TEST_RR( op"add8 r0, r",0, HH1,", r",1, HH2,"") \
+ TEST_RR( op"add8 r14, r",12,HH2,", r",10,HH1,"") \
+ TEST_RR( op"sub8 r0, r",0, HH1,", r",1, HH2,"") \
+ TEST_RR( op"sub8 r14, r",12,HH2,", r",10,HH1,"")
+
+ TEST_GROUP("Parallel addition and subtraction, signed")
+
+ PARALLEL_ADD_SUB("s")
+ PARALLEL_ADD_SUB("q")
+ PARALLEL_ADD_SUB("sh")
+
+ TEST_GROUP("Parallel addition and subtraction, unsigned")
+
+ PARALLEL_ADD_SUB("u")
+ PARALLEL_ADD_SUB("uq")
+ PARALLEL_ADD_SUB("uh")
+
+ TEST_GROUP("Miscellaneous operations")
+
+ TEST_RR("qadd r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR("qadd lr, r",9, VAL2,", r",8, VAL1,"")
+ TEST_RR("qsub r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR("qsub lr, r",9, VAL2,", r",8, VAL1,"")
+ TEST_RR("qdadd r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR("qdadd lr, r",9, VAL2,", r",8, VAL1,"")
+ TEST_RR("qdsub r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR("qdsub lr, r",9, VAL2,", r",8, VAL1,"")
+
+ TEST_R("rev.w r0, r",0, VAL1,"")
+ TEST_R("rev r14, r",12, VAL2,"")
+ TEST_R("rev16.w r0, r",0, VAL1,"")
+ TEST_R("rev16 r14, r",12, VAL2,"")
+ TEST_R("rbit r0, r",0, VAL1,"")
+ TEST_R("rbit r14, r",12, VAL2,"")
+ TEST_R("revsh.w r0, r",0, VAL1,"")
+ TEST_R("revsh r14, r",12, VAL2,"")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa9cff8c) " @ rev pc, r12");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa9cfd8c) " @ rev sp, r12");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa9ffe8f) " @ rev r14, pc");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa9dfe8d) " @ rev r14, sp");
+
+ TEST_RR("sel r0, r",0, VAL1,", r",1, VAL2,"")
+ TEST_RR("sel r14, r",12,VAL1,", r",10, VAL2,"")
+
+ TEST_R("clz r0, r",0, 0x0,"")
+ TEST_R("clz r7, r",14,0x1,"")
+ TEST_R("clz lr, r",7, 0xffffffff,"")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xfa80f030) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xfab0f000) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xfaffff7f) "") /* Unallocated space */
+
+ TEST_GROUP("Multiply, multiply accumulate, and absolute difference operations")
+
+ TEST_RR( "mul r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "mul r7, r",8, VAL2,", r",9, VAL2,"")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08ff09) " @ mul pc, r8, r9")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08fd09) " @ mul sp, r8, r9")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb0ff709) " @ mul r7, pc, r9")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb0df709) " @ mul r7, sp, r9")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70f) " @ mul r7, r8, pc")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08f70d) " @ mul r7, r8, sp")
+
+ TEST_RRR( "mla r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "mla r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08af09) " @ mla pc, r8, r9, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08ad09) " @ mla sp, r8, r9, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb0fa709) " @ mla r7, pc, r9, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb0da709) " @ mla r7, sp, r9, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70f) " @ mla r7, r8, pc, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08a70d) " @ mla r7, r8, sp, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb08d709) " @ mla r7, r8, r9, sp");
+
+ TEST_RRR( "mls r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "mls r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+
+ TEST_RRR( "smlabb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlabb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RRR( "smlatb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlatb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RRR( "smlabt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlabt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RRR( "smlatt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlatt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "smulbb r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulbb r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_RR( "smultb r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smultb r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_RR( "smulbt r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulbt r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_RR( "smultt r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smultt r7, r",8, VAL3,", r",9, VAL1,"")
+
+ TEST_RRR( "smlad r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
+ TEST_RRR( "smlad r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+ TEST_RRR( "smladx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
+ TEST_RRR( "smladx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+ TEST_RR( "smuad r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "smuad r14, r",12,HH2,", r",10,HH1,"")
+ TEST_RR( "smuadx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "smuadx r14, r",12,HH2,", r",10,HH1,"")
+
+ TEST_RRR( "smlawb r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlawb r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RRR( "smlawt r0, r",1, VAL1,", r",2, VAL2,", r",3, VAL3,"")
+ TEST_RRR( "smlawt r7, r",8, VAL3,", r",9, VAL1,", r",10, VAL2,"")
+ TEST_RR( "smulwb r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulwb r7, r",8, VAL3,", r",9, VAL1,"")
+ TEST_RR( "smulwt r0, r",1, VAL1,", r",2, VAL2,"")
+ TEST_RR( "smulwt r7, r",8, VAL3,", r",9, VAL1,"")
+
+ TEST_RRR( "smlsd r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
+ TEST_RRR( "smlsd r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+ TEST_RRR( "smlsdx r0, r",0, HH1,", r",1, HH2,", r",2, VAL1,"")
+ TEST_RRR( "smlsdx r14, r",12,HH2,", r",10,HH1,", r",8, VAL2,"")
+ TEST_RR( "smusd r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "smusd r14, r",12,HH2,", r",10,HH1,"")
+ TEST_RR( "smusdx r0, r",0, HH1,", r",1, HH2,"")
+ TEST_RR( "smusdx r14, r",12,HH2,", r",10,HH1,"")
+
+ TEST_RRR( "smmla r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
+ TEST_RRR( "smmla r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+ TEST_RRR( "smmlar r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
+ TEST_RRR( "smmlar r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+ TEST_RR( "smmul r0, r",0, VAL1,", r",1, VAL2,"")
+ TEST_RR( "smmul r14, r",12,VAL2,", r",10,VAL1,"")
+ TEST_RR( "smmulr r0, r",0, VAL1,", r",1, VAL2,"")
+ TEST_RR( "smmulr r14, r",12,VAL2,", r",10,VAL1,"")
+
+ TEST_RRR( "smmls r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
+ TEST_RRR( "smmls r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+ TEST_RRR( "smmlsr r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL1,"")
+ TEST_RRR( "smmlsr r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL2,"")
+
+ TEST_RRR( "usada8 r0, r",0, VAL1,", r",1, VAL2,", r",2, VAL3,"")
+ TEST_RRR( "usada8 r14, r",12,VAL2,", r",10,VAL1,", r",8, VAL3,"")
+ TEST_RR( "usad8 r0, r",0, VAL1,", r",1, VAL2,"")
+ TEST_RR( "usad8 r14, r",12,VAL2,", r",10,VAL1,"")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb00f010) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb0fff1f) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb70f010) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb700010) "") /* Unallocated space */
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb7fff1f) "") /* Unallocated space */
+
+ TEST_GROUP("Long multiply, long multiply accumulate, and divide")
+
+ TEST_RR( "smull r0, r1, r",2, VAL1,", r",3, VAL2,"")
+ TEST_RR( "smull r7, r8, r",9, VAL2,", r",10, VAL1,"")
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb89f80a) " @ smull pc, r8, r9, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb89d80a) " @ smull sp, r8, r9, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb897f0a) " @ smull r7, pc, r9, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb897d0a) " @ smull r7, sp, r9, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb8f780a) " @ smull r7, r8, pc, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb8d780a) " @ smull r7, r8, sp, r10");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb89780f) " @ smull r7, r8, r9, pc");
+ TEST_UNSUPPORTED(__inst_thumb32(0xfb89780d) " @ smull r7, r8, r9, sp");
+
+ TEST_RR( "umull r0, r1, r",2, VAL1,", r",3, VAL2,"")
+ TEST_RR( "umull r7, r8, r",9, VAL2,", r",10, VAL1,"")
+
+ TEST_RRRR( "smlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+
+ TEST_RRRR( "smlalbb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlalbb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRRR( "smlalbt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlalbt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRRR( "smlaltb r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlaltb r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRRR( "smlaltt r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "smlaltt r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+
+ TEST_RRRR( "smlald r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+ TEST_RRRR( "smlald r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+ TEST_RRRR( "smlaldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+ TEST_RRRR( "smlaldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+
+ TEST_RRRR( "smlsld r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+ TEST_RRRR( "smlsld r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+ TEST_RRRR( "smlsldx r",0, VAL1,", r",1, VAL2, ", r",0, HH1,", r",1, HH2)
+ TEST_RRRR( "smlsldx r",11,VAL2,", r",10,VAL1, ", r",9, HH2,", r",8, HH1)
+
+ TEST_RRRR( "umlal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "umlal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+ TEST_RRRR( "umaal r",0, VAL1,", r",1, VAL2,", r",2, VAL3,", r",3, VAL4)
+ TEST_RRRR( "umaal r",8, VAL4,", r",9, VAL1,", r",10,VAL2,", r",11,VAL3)
+
+ TEST_GROUP("Coprocessor instructions")
+
+ TEST_UNSUPPORTED(__inst_thumb32(0xfc000000) "")
+ TEST_UNSUPPORTED(__inst_thumb32(0xffffffff) "")
+
+ TEST_GROUP("Testing instructions in IT blocks")
+
+ TEST_ITBLOCK("sub.w r0, r0")
+
+ verbose("\n");
+}
+