summaryrefslogtreecommitdiffstats
path: root/arch/mips/net
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/mips/net/Makefile4
-rw-r--r--arch/mips/net/bpf_jit.c1299
-rw-r--r--arch/mips/net/bpf_jit.h84
-rw-r--r--arch/mips/net/bpf_jit_asm.S285
-rw-r--r--arch/mips/net/ebpf_jit.c1849
-rw-r--r--arch/mips/netlogic/Kconfig86
-rw-r--r--arch/mips/netlogic/Makefile3
-rw-r--r--arch/mips/netlogic/Platform17
-rw-r--r--arch/mips/netlogic/common/Makefile5
-rw-r--r--arch/mips/netlogic/common/earlycons.c63
-rw-r--r--arch/mips/netlogic/common/irq.c354
-rw-r--r--arch/mips/netlogic/common/reset.S299
-rw-r--r--arch/mips/netlogic/common/smp.c285
-rw-r--r--arch/mips/netlogic/common/smpboot.S141
-rw-r--r--arch/mips/netlogic/common/time.c110
-rw-r--r--arch/mips/netlogic/xlp/Makefile11
-rw-r--r--arch/mips/netlogic/xlp/ahci-init-xlp2.c390
-rw-r--r--arch/mips/netlogic/xlp/ahci-init.c209
-rw-r--r--arch/mips/netlogic/xlp/cop2-ex.c121
-rw-r--r--arch/mips/netlogic/xlp/dt.c95
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c508
-rw-r--r--arch/mips/netlogic/xlp/setup.c179
-rw-r--r--arch/mips/netlogic/xlp/usb-init-xlp2.c288
-rw-r--r--arch/mips/netlogic/xlp/usb-init.c149
-rw-r--r--arch/mips/netlogic/xlp/wakeup.c212
-rw-r--r--arch/mips/netlogic/xlr/Makefile2
-rw-r--r--arch/mips/netlogic/xlr/fmn-config.c293
-rw-r--r--arch/mips/netlogic/xlr/fmn.c204
-rw-r--r--arch/mips/netlogic/xlr/platform-flash.c217
-rw-r--r--arch/mips/netlogic/xlr/platform.c250
-rw-r--r--arch/mips/netlogic/xlr/setup.c210
-rw-r--r--arch/mips/netlogic/xlr/wakeup.c85
32 files changed, 8307 insertions, 0 deletions
diff --git a/arch/mips/net/Makefile b/arch/mips/net/Makefile
new file mode 100644
index 000000000..47d678416
--- /dev/null
+++ b/arch/mips/net/Makefile
@@ -0,0 +1,4 @@
+# MIPS networking code
+
+obj-$(CONFIG_MIPS_CBPF_JIT) += bpf_jit.o bpf_jit_asm.o
+obj-$(CONFIG_MIPS_EBPF_JIT) += ebpf_jit.o
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
new file mode 100644
index 000000000..43e6597c7
--- /dev/null
+++ b/arch/mips/net/bpf_jit.c
@@ -0,0 +1,1299 @@
+/*
+ * Just-In-Time compiler for BPF filters on MIPS
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/if_vlan.h>
+#include <linux/moduleloader.h>
+#include <linux/netdevice.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <asm/asm.h>
+#include <asm/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/uasm.h>
+
+#include "bpf_jit.h"
+
+/* ABI
+ * r_skb_hl SKB header length
+ * r_data SKB data pointer
+ * r_off Offset
+ * r_A BPF register A
+ * r_X BPF register X
+ * r_skb *skb
+ * r_M *scratch memory
+ * r_skb_len SKB length
+ *
+ * On entry (*bpf_func)(*skb, *filter)
+ * a0 = MIPS_R_A0 = skb;
+ * a1 = MIPS_R_A1 = filter;
+ *
+ * Stack
+ * ...
+ * M[15]
+ * M[14]
+ * M[13]
+ * ...
+ * M[0] <-- r_M
+ * saved reg k-1
+ * saved reg k-2
+ * ...
+ * saved reg 0 <-- r_sp
+ * <no argument area>
+ *
+ * Packet layout
+ *
+ * <--------------------- len ------------------------>
+ * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
+ * ----------------------------------------------------
+ * | skb->data |
+ * ----------------------------------------------------
+ */
+
+#define ptr typeof(unsigned long)
+
+#define SCRATCH_OFF(k) (4 * (k))
+
+/* JIT flags */
+#define SEEN_CALL (1 << BPF_MEMWORDS)
+#define SEEN_SREG_SFT (BPF_MEMWORDS + 1)
+#define SEEN_SREG_BASE (1 << SEEN_SREG_SFT)
+#define SEEN_SREG(x) (SEEN_SREG_BASE << (x))
+#define SEEN_OFF SEEN_SREG(2)
+#define SEEN_A SEEN_SREG(3)
+#define SEEN_X SEEN_SREG(4)
+#define SEEN_SKB SEEN_SREG(5)
+#define SEEN_MEM SEEN_SREG(6)
+/* SEEN_SK_DATA also implies skb_hl an skb_len */
+#define SEEN_SKB_DATA (SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
+
+/* Arguments used by JIT */
+#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
+
+#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf: The sk_filter
+ * @prologue_bytes: Number of bytes for prologue
+ * @idx: Instruction index
+ * @flags: JIT flags
+ * @offsets: Instruction offsets
+ * @target: Memory location for the compiled filter
+ */
+struct jit_ctx {
+ const struct bpf_prog *skf;
+ unsigned int prologue_bytes;
+ u32 idx;
+ u32 flags;
+ u32 *offsets;
+ u32 *target;
+};
+
+
+static inline int optimize_div(u32 *k)
+{
+ /* power of 2 divides can be implemented with right shift */
+ if (!(*k & (*k-1))) {
+ *k = ilog2(*k);
+ return 1;
+ }
+
+ return 0;
+}
+
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr(ctx, func, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ uasm_i_##func(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
+} while (0)
+
+/*
+ * Similar to emit_instr but it must be used when we need to emit
+ * 32-bit or 64-bit instructions
+ */
+#define emit_long_instr(ctx, func, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ UASM_i_##func(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
+} while (0)
+
+/* Determine if immediate is within the 16-bit signed range */
+static inline bool is_range16(s32 imm)
+{
+ return !(imm >= SBIT(15) || imm < -SBIT(15));
+}
+
+static inline void emit_addu(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, addu, dst, src1, src2);
+}
+
+static inline void emit_nop(struct jit_ctx *ctx)
+{
+ emit_instr(ctx, nop);
+}
+
+/* Load a u32 immediate to a register */
+static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL) {
+ /* addiu can only handle s16 */
+ if (!is_range16(imm)) {
+ u32 *p = &ctx->target[ctx->idx];
+ uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
+ p = &ctx->target[ctx->idx + 1];
+ uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
+ } else {
+ u32 *p = &ctx->target[ctx->idx];
+ uasm_i_addiu(&p, dst, r_zero, imm);
+ }
+ }
+ ctx->idx++;
+
+ if (!is_range16(imm))
+ ctx->idx++;
+}
+
+static inline void emit_or(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, or, dst, src1, src2);
+}
+
+static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
+ struct jit_ctx *ctx)
+{
+ if (imm >= BIT(16)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_or(dst, src, r_tmp, ctx);
+ } else {
+ emit_instr(ctx, ori, dst, src, imm);
+ }
+}
+
+static inline void emit_daddiu(unsigned int dst, unsigned int src,
+ int imm, struct jit_ctx *ctx)
+{
+ /*
+ * Only used for stack, so the imm is relatively small
+ * and it fits in 15-bits
+ */
+ emit_instr(ctx, daddiu, dst, src, imm);
+}
+
+static inline void emit_addiu(unsigned int dst, unsigned int src,
+ u32 imm, struct jit_ctx *ctx)
+{
+ if (!is_range16(imm)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_addu(dst, r_tmp, src, ctx);
+ } else {
+ emit_instr(ctx, addiu, dst, src, imm);
+ }
+}
+
+static inline void emit_and(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, and, dst, src1, src2);
+}
+
+static inline void emit_andi(unsigned int dst, unsigned int src,
+ u32 imm, struct jit_ctx *ctx)
+{
+ /* If imm does not fit in u16 then load it to register */
+ if (imm >= BIT(16)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_and(dst, src, r_tmp, ctx);
+ } else {
+ emit_instr(ctx, andi, dst, src, imm);
+ }
+}
+
+static inline void emit_xor(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, xor, dst, src1, src2);
+}
+
+static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
+{
+ /* If imm does not fit in u16 then load it to register */
+ if (imm >= BIT(16)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_xor(dst, src, r_tmp, ctx);
+ } else {
+ emit_instr(ctx, xori, dst, src, imm);
+ }
+}
+
+static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
+{
+ emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
+}
+
+static inline void emit_subu(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, subu, dst, src1, src2);
+}
+
+static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
+{
+ emit_subu(reg, r_zero, reg, ctx);
+}
+
+static inline void emit_sllv(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, sllv, dst, src, sa);
+}
+
+static inline void emit_sll(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ /* sa is 5-bits long */
+ if (sa >= BIT(5))
+ /* Shifting >= 32 results in zero */
+ emit_jit_reg_move(dst, r_zero, ctx);
+ else
+ emit_instr(ctx, sll, dst, src, sa);
+}
+
+static inline void emit_srlv(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, srlv, dst, src, sa);
+}
+
+static inline void emit_srl(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ /* sa is 5-bits long */
+ if (sa >= BIT(5))
+ /* Shifting >= 32 results in zero */
+ emit_jit_reg_move(dst, r_zero, ctx);
+ else
+ emit_instr(ctx, srl, dst, src, sa);
+}
+
+static inline void emit_slt(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, slt, dst, src1, src2);
+}
+
+static inline void emit_sltu(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, sltu, dst, src1, src2);
+}
+
+static inline void emit_sltiu(unsigned dst, unsigned int src,
+ unsigned int imm, struct jit_ctx *ctx)
+{
+ /* 16 bit immediate */
+ if (!is_range16((s32)imm)) {
+ emit_load_imm(r_tmp, imm, ctx);
+ emit_sltu(dst, src, r_tmp, ctx);
+ } else {
+ emit_instr(ctx, sltiu, dst, src, imm);
+ }
+
+}
+
+/* Store register on the stack */
+static inline void emit_store_stack_reg(ptr reg, ptr base,
+ unsigned int offset,
+ struct jit_ctx *ctx)
+{
+ emit_long_instr(ctx, SW, reg, offset, base);
+}
+
+static inline void emit_store(ptr reg, ptr base, unsigned int offset,
+ struct jit_ctx *ctx)
+{
+ emit_instr(ctx, sw, reg, offset, base);
+}
+
+static inline void emit_load_stack_reg(ptr reg, ptr base,
+ unsigned int offset,
+ struct jit_ctx *ctx)
+{
+ emit_long_instr(ctx, LW, reg, offset, base);
+}
+
+static inline void emit_load(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lw, reg, offset, base);
+}
+
+static inline void emit_load_byte(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lb, reg, offset, base);
+}
+
+static inline void emit_half_load(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lh, reg, offset, base);
+}
+
+static inline void emit_half_load_unsigned(unsigned int reg, unsigned int base,
+ unsigned int offset, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, lhu, reg, offset, base);
+}
+
+static inline void emit_mul(unsigned int dst, unsigned int src1,
+ unsigned int src2, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, mul, dst, src1, src2);
+}
+
+static inline void emit_div(unsigned int dst, unsigned int src,
+ struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL) {
+ u32 *p = &ctx->target[ctx->idx];
+ uasm_i_divu(&p, dst, src);
+ p = &ctx->target[ctx->idx + 1];
+ uasm_i_mflo(&p, dst);
+ }
+ ctx->idx += 2; /* 2 insts */
+}
+
+static inline void emit_mod(unsigned int dst, unsigned int src,
+ struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL) {
+ u32 *p = &ctx->target[ctx->idx];
+ uasm_i_divu(&p, dst, src);
+ p = &ctx->target[ctx->idx + 1];
+ uasm_i_mfhi(&p, dst);
+ }
+ ctx->idx += 2; /* 2 insts */
+}
+
+static inline void emit_dsll(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, dsll, dst, src, sa);
+}
+
+static inline void emit_dsrl32(unsigned int dst, unsigned int src,
+ unsigned int sa, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, dsrl32, dst, src, sa);
+}
+
+static inline void emit_wsbh(unsigned int dst, unsigned int src,
+ struct jit_ctx *ctx)
+{
+ emit_instr(ctx, wsbh, dst, src);
+}
+
+/* load pointer to register */
+static inline void emit_load_ptr(unsigned int dst, unsigned int src,
+ int imm, struct jit_ctx *ctx)
+{
+ /* src contains the base addr of the 32/64-pointer */
+ emit_long_instr(ctx, LW, dst, imm, src);
+}
+
+/* load a function pointer to register */
+static inline void emit_load_func(unsigned int reg, ptr imm,
+ struct jit_ctx *ctx)
+{
+ if (IS_ENABLED(CONFIG_64BIT)) {
+ /* At this point imm is always 64-bit */
+ emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
+ emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
+ emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
+ emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
+ emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
+ } else {
+ emit_load_imm(reg, imm, ctx);
+ }
+}
+
+/* Move to real MIPS register */
+static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
+{
+ emit_long_instr(ctx, ADDU, dst, src, r_zero);
+}
+
+/* Move to JIT (32-bit) register */
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
+{
+ emit_addu(dst, src, r_zero, ctx);
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+ if (ctx->target == NULL)
+ return 0;
+
+ /*
+ * We want a pc-relative branch. We only do forward branches
+ * so tgt is always after pc. tgt is the instruction offset
+ * we want to jump to.
+
+ * Branch on MIPS:
+ * I: target_offset <- sign_extend(offset)
+ * I+1: PC += target_offset (delay slot)
+ *
+ * ctx->idx currently points to the branch instruction
+ * but the offset is added to the delay slot so we need
+ * to subtract 4.
+ */
+ return ctx->offsets[tgt] -
+ (ctx->idx * 4 - ctx->prologue_bytes) - 4;
+}
+
+static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
+ unsigned int imm, struct jit_ctx *ctx)
+{
+ if (ctx->target != NULL) {
+ u32 *p = &ctx->target[ctx->idx];
+
+ switch (cond) {
+ case MIPS_COND_EQ:
+ uasm_i_beq(&p, reg1, reg2, imm);
+ break;
+ case MIPS_COND_NE:
+ uasm_i_bne(&p, reg1, reg2, imm);
+ break;
+ case MIPS_COND_ALL:
+ uasm_i_b(&p, imm);
+ break;
+ default:
+ pr_warn("%s: Unhandled branch conditional: %d\n",
+ __func__, cond);
+ }
+ }
+ ctx->idx++;
+}
+
+static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
+{
+ emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
+}
+
+static inline void emit_jalr(unsigned int link, unsigned int reg,
+ struct jit_ctx *ctx)
+{
+ emit_instr(ctx, jalr, link, reg);
+}
+
+static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
+{
+ emit_instr(ctx, jr, reg);
+}
+
+static inline u16 align_sp(unsigned int num)
+{
+ /* Double word alignment for 32-bit, quadword for 64-bit */
+ unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
+ num = (num + (align - 1)) & -align;
+ return num;
+}
+
+static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
+{
+ int i = 0, real_off = 0;
+ u32 sflags, tmp_flags;
+
+ /* Adjust the stack pointer */
+ if (offset)
+ emit_stack_offset(-align_sp(offset), ctx);
+
+ tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
+ /* sflags is essentially a bitmap */
+ while (tmp_flags) {
+ if ((sflags >> i) & 0x1) {
+ emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
+ ctx);
+ real_off += SZREG;
+ }
+ i++;
+ tmp_flags >>= 1;
+ }
+
+ /* save return address */
+ if (ctx->flags & SEEN_CALL) {
+ emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
+ real_off += SZREG;
+ }
+
+ /* Setup r_M leaving the alignment gap if necessary */
+ if (ctx->flags & SEEN_MEM) {
+ if (real_off % (SZREG * 2))
+ real_off += SZREG;
+ emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
+ }
+}
+
+static void restore_bpf_jit_regs(struct jit_ctx *ctx,
+ unsigned int offset)
+{
+ int i, real_off = 0;
+ u32 sflags, tmp_flags;
+
+ tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
+ /* sflags is a bitmap */
+ i = 0;
+ while (tmp_flags) {
+ if ((sflags >> i) & 0x1) {
+ emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
+ ctx);
+ real_off += SZREG;
+ }
+ i++;
+ tmp_flags >>= 1;
+ }
+
+ /* restore return address */
+ if (ctx->flags & SEEN_CALL)
+ emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
+
+ /* Restore the sp and discard the scrach memory */
+ if (offset)
+ emit_stack_offset(align_sp(offset), ctx);
+}
+
+static unsigned int get_stack_depth(struct jit_ctx *ctx)
+{
+ int sp_off = 0;
+
+
+ /* How may s* regs do we need to preserved? */
+ sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
+
+ if (ctx->flags & SEEN_MEM)
+ sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
+
+ if (ctx->flags & SEEN_CALL)
+ sp_off += SZREG; /* Space for our ra register */
+
+ return sp_off;
+}
+
+static void build_prologue(struct jit_ctx *ctx)
+{
+ int sp_off;
+
+ /* Calculate the total offset for the stack pointer */
+ sp_off = get_stack_depth(ctx);
+ save_bpf_jit_regs(ctx, sp_off);
+
+ if (ctx->flags & SEEN_SKB)
+ emit_reg_move(r_skb, MIPS_R_A0, ctx);
+
+ if (ctx->flags & SEEN_SKB_DATA) {
+ /* Load packet length */
+ emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
+ ctx);
+ emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
+ ctx);
+ /* Load the data pointer */
+ emit_load_ptr(r_skb_data, r_skb,
+ offsetof(struct sk_buff, data), ctx);
+ /* Load the header length */
+ emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
+ }
+
+ if (ctx->flags & SEEN_X)
+ emit_jit_reg_move(r_X, r_zero, ctx);
+
+ /*
+ * Do not leak kernel data to userspace, we only need to clear
+ * r_A if it is ever used. In fact if it is never used, we
+ * will not save/restore it, so clearing it in this case would
+ * corrupt the state of the caller.
+ */
+ if (bpf_needs_clear_a(&ctx->skf->insns[0]) &&
+ (ctx->flags & SEEN_A))
+ emit_jit_reg_move(r_A, r_zero, ctx);
+}
+
+static void build_epilogue(struct jit_ctx *ctx)
+{
+ unsigned int sp_off;
+
+ /* Calculate the total offset for the stack pointer */
+
+ sp_off = get_stack_depth(ctx);
+ restore_bpf_jit_regs(ctx, sp_off);
+
+ /* Return */
+ emit_jr(r_ra, ctx);
+ emit_nop(ctx);
+}
+
+#define CHOOSE_LOAD_FUNC(K, func) \
+ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
+ func##_positive)
+
+static bool is_bad_offset(int b_off)
+{
+ return b_off > 0x1ffff || b_off < -0x20000;
+}
+
+static int build_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct sock_filter *inst;
+ unsigned int i, off, condt;
+ u32 k, b_off __maybe_unused;
+ u8 (*sk_load_func)(unsigned long *skb, int offset);
+
+ for (i = 0; i < prog->len; i++) {
+ u16 code;
+
+ inst = &(prog->insns[i]);
+ pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
+ __func__, inst->code, inst->jt, inst->jf, inst->k);
+ k = inst->k;
+ code = bpf_anc_helper(inst);
+
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ switch (code) {
+ case BPF_LD | BPF_IMM:
+ /* A <- k ==> li r_A, k */
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_A, k, ctx);
+ break;
+ case BPF_LD | BPF_W | BPF_LEN:
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
+ /* A <- len ==> lw r_A, offset(skb) */
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ off = offsetof(struct sk_buff, len);
+ emit_load(r_A, r_skb, off, ctx);
+ break;
+ case BPF_LD | BPF_MEM:
+ /* A <- M[k] ==> lw r_A, offset(M) */
+ ctx->flags |= SEEN_MEM | SEEN_A;
+ emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
+ break;
+ case BPF_LD | BPF_W | BPF_ABS:
+ /* A <- P[k:4] */
+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
+ goto load;
+ case BPF_LD | BPF_H | BPF_ABS:
+ /* A <- P[k:2] */
+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
+ goto load;
+ case BPF_LD | BPF_B | BPF_ABS:
+ /* A <- P[k:1] */
+ sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
+load:
+ emit_load_imm(r_off, k, ctx);
+load_common:
+ ctx->flags |= SEEN_CALL | SEEN_OFF |
+ SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
+
+ emit_load_func(r_s0, (ptr)sk_load_func, ctx);
+ emit_reg_move(MIPS_R_A0, r_skb, ctx);
+ emit_jalr(MIPS_R_RA, r_s0, ctx);
+ /* Load second argument to delay slot */
+ emit_reg_move(MIPS_R_A1, r_off, ctx);
+ /* Check the error value */
+ emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
+ ctx);
+ /* Load return register on DS for failures */
+ emit_reg_move(r_ret, r_zero, ctx);
+ /* Return with error */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_LD | BPF_W | BPF_IND:
+ /* A <- P[X + k:4] */
+ sk_load_func = sk_load_word;
+ goto load_ind;
+ case BPF_LD | BPF_H | BPF_IND:
+ /* A <- P[X + k:2] */
+ sk_load_func = sk_load_half;
+ goto load_ind;
+ case BPF_LD | BPF_B | BPF_IND:
+ /* A <- P[X + k:1] */
+ sk_load_func = sk_load_byte;
+load_ind:
+ ctx->flags |= SEEN_OFF | SEEN_X;
+ emit_addiu(r_off, r_X, k, ctx);
+ goto load_common;
+ case BPF_LDX | BPF_IMM:
+ /* X <- k */
+ ctx->flags |= SEEN_X;
+ emit_load_imm(r_X, k, ctx);
+ break;
+ case BPF_LDX | BPF_MEM:
+ /* X <- M[k] */
+ ctx->flags |= SEEN_X | SEEN_MEM;
+ emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
+ break;
+ case BPF_LDX | BPF_W | BPF_LEN:
+ /* X <- len */
+ ctx->flags |= SEEN_X | SEEN_SKB;
+ off = offsetof(struct sk_buff, len);
+ emit_load(r_X, r_skb, off, ctx);
+ break;
+ case BPF_LDX | BPF_B | BPF_MSH:
+ /* X <- 4 * (P[k:1] & 0xf) */
+ ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
+ /* Load offset to a1 */
+ emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
+ /*
+ * This may emit two instructions so it may not fit
+ * in the delay slot. So use a0 in the delay slot.
+ */
+ emit_load_imm(MIPS_R_A1, k, ctx);
+ emit_jalr(MIPS_R_RA, r_s0, ctx);
+ emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
+ /* Check the error value */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_NE, r_ret, 0, b_off, ctx);
+ emit_reg_move(r_ret, r_zero, ctx);
+ /* We are good */
+ /* X <- P[1:K] & 0xf */
+ emit_andi(r_X, r_A, 0xf, ctx);
+ /* X << 2 */
+ emit_b(b_imm(i + 1, ctx), ctx);
+ emit_sll(r_X, r_X, 2, ctx); /* delay slot */
+ break;
+ case BPF_ST:
+ /* M[k] <- A */
+ ctx->flags |= SEEN_MEM | SEEN_A;
+ emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
+ break;
+ case BPF_STX:
+ /* M[k] <- X */
+ ctx->flags |= SEEN_MEM | SEEN_X;
+ emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
+ break;
+ case BPF_ALU | BPF_ADD | BPF_K:
+ /* A += K */
+ ctx->flags |= SEEN_A;
+ emit_addiu(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_ADD | BPF_X:
+ /* A += X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_addu(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K:
+ /* A -= K */
+ ctx->flags |= SEEN_A;
+ emit_addiu(r_A, r_A, -k, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X:
+ /* A -= X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_subu(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ /* A *= K */
+ /* Load K to scratch register before MUL */
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_s0, k, ctx);
+ emit_mul(r_A, r_A, r_s0, ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_X:
+ /* A *= X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_mul(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K:
+ /* A /= k */
+ if (k == 1)
+ break;
+ if (optimize_div(&k)) {
+ ctx->flags |= SEEN_A;
+ emit_srl(r_A, r_A, k, ctx);
+ break;
+ }
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_s0, k, ctx);
+ emit_div(r_A, r_s0, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K:
+ /* A %= k */
+ if (k == 1) {
+ ctx->flags |= SEEN_A;
+ emit_jit_reg_move(r_A, r_zero, ctx);
+ } else {
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_s0, k, ctx);
+ emit_mod(r_A, r_s0, ctx);
+ }
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ /* A /= X */
+ ctx->flags |= SEEN_X | SEEN_A;
+ /* Check if r_X is zero */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
+ emit_load_imm(r_ret, 0, ctx); /* delay slot */
+ emit_div(r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_X:
+ /* A %= X */
+ ctx->flags |= SEEN_X | SEEN_A;
+ /* Check if r_X is zero */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_X, r_zero, b_off, ctx);
+ emit_load_imm(r_ret, 0, ctx); /* delay slot */
+ emit_mod(r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_K:
+ /* A |= K */
+ ctx->flags |= SEEN_A;
+ emit_ori(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_X:
+ /* A |= X */
+ ctx->flags |= SEEN_A;
+ emit_ori(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_K:
+ /* A ^= k */
+ ctx->flags |= SEEN_A;
+ emit_xori(r_A, r_A, k, ctx);
+ break;
+ case BPF_ANC | SKF_AD_ALU_XOR_X:
+ case BPF_ALU | BPF_XOR | BPF_X:
+ /* A ^= X */
+ ctx->flags |= SEEN_A;
+ emit_xor(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_K:
+ /* A &= K */
+ ctx->flags |= SEEN_A;
+ emit_andi(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_X:
+ /* A &= X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_and(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ /* A <<= K */
+ ctx->flags |= SEEN_A;
+ emit_sll(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_X:
+ /* A <<= X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_sllv(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K:
+ /* A >>= K */
+ ctx->flags |= SEEN_A;
+ emit_srl(r_A, r_A, k, ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X:
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_srlv(r_A, r_A, r_X, ctx);
+ break;
+ case BPF_ALU | BPF_NEG:
+ /* A = -A */
+ ctx->flags |= SEEN_A;
+ emit_neg(r_A, ctx);
+ break;
+ case BPF_JMP | BPF_JA:
+ /* pc += K */
+ b_off = b_imm(i + k + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ /* pc += ( A == K ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_EQ | MIPS_COND_K;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ ctx->flags |= SEEN_X;
+ /* pc += ( A == X ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_EQ | MIPS_COND_X;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JGE | BPF_K:
+ /* pc += ( A >= K ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_GE | MIPS_COND_K;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JGE | BPF_X:
+ ctx->flags |= SEEN_X;
+ /* pc += ( A >= X ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_GE | MIPS_COND_X;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JGT | BPF_K:
+ /* pc += ( A > K ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_GT | MIPS_COND_K;
+ goto jmp_cmp;
+ case BPF_JMP | BPF_JGT | BPF_X:
+ ctx->flags |= SEEN_X;
+ /* pc += ( A > X ) ? pc->jt : pc->jf */
+ condt = MIPS_COND_GT | MIPS_COND_X;
+jmp_cmp:
+ /* Greater or Equal */
+ if ((condt & MIPS_COND_GE) ||
+ (condt & MIPS_COND_GT)) {
+ if (condt & MIPS_COND_K) { /* K */
+ ctx->flags |= SEEN_A;
+ emit_sltiu(r_s0, r_A, k, ctx);
+ } else { /* X */
+ ctx->flags |= SEEN_A |
+ SEEN_X;
+ emit_sltu(r_s0, r_A, r_X, ctx);
+ }
+ /* A < (K|X) ? r_scrach = 1 */
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
+ ctx);
+ emit_nop(ctx);
+ /* A > (K|X) ? scratch = 0 */
+ if (condt & MIPS_COND_GT) {
+ /* Checking for equality */
+ ctx->flags |= SEEN_A | SEEN_X;
+ if (condt & MIPS_COND_K)
+ emit_load_imm(r_s0, k, ctx);
+ else
+ emit_jit_reg_move(r_s0, r_X,
+ ctx);
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_bcond(MIPS_COND_EQ, r_A, r_s0,
+ b_off, ctx);
+ emit_nop(ctx);
+ /* Finally, A > K|X */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ } else {
+ /* A >= (K|X) so jump */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ }
+ } else {
+ /* A == K|X */
+ if (condt & MIPS_COND_K) { /* K */
+ ctx->flags |= SEEN_A;
+ emit_load_imm(r_s0, k, ctx);
+ /* jump true */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_bcond(MIPS_COND_EQ, r_A, r_s0,
+ b_off, ctx);
+ emit_nop(ctx);
+ /* jump false */
+ b_off = b_imm(i + inst->jf + 1,
+ ctx);
+ emit_bcond(MIPS_COND_NE, r_A, r_s0,
+ b_off, ctx);
+ emit_nop(ctx);
+ } else { /* X */
+ /* jump true */
+ ctx->flags |= SEEN_A | SEEN_X;
+ b_off = b_imm(i + inst->jt + 1,
+ ctx);
+ emit_bcond(MIPS_COND_EQ, r_A, r_X,
+ b_off, ctx);
+ emit_nop(ctx);
+ /* jump false */
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_bcond(MIPS_COND_NE, r_A, r_X,
+ b_off, ctx);
+ emit_nop(ctx);
+ }
+ }
+ break;
+ case BPF_JMP | BPF_JSET | BPF_K:
+ ctx->flags |= SEEN_A;
+ /* pc += (A & K) ? pc -> jt : pc -> jf */
+ emit_load_imm(r_s1, k, ctx);
+ emit_and(r_s0, r_A, r_s1, ctx);
+ /* jump true */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
+ emit_nop(ctx);
+ /* jump false */
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_JMP | BPF_JSET | BPF_X:
+ ctx->flags |= SEEN_X | SEEN_A;
+ /* pc += (A & X) ? pc -> jt : pc -> jf */
+ emit_and(r_s0, r_A, r_X, ctx);
+ /* jump true */
+ b_off = b_imm(i + inst->jt + 1, ctx);
+ emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
+ emit_nop(ctx);
+ /* jump false */
+ b_off = b_imm(i + inst->jf + 1, ctx);
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ break;
+ case BPF_RET | BPF_A:
+ ctx->flags |= SEEN_A;
+ if (i != prog->len - 1) {
+ /*
+ * If this is not the last instruction
+ * then jump to the epilogue
+ */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ }
+ emit_reg_move(r_ret, r_A, ctx); /* delay slot */
+ break;
+ case BPF_RET | BPF_K:
+ /*
+ * It can emit two instructions so it does not fit on
+ * the delay slot.
+ */
+ emit_load_imm(r_ret, k, ctx);
+ if (i != prog->len - 1) {
+ /*
+ * If this is not the last instruction
+ * then jump to the epilogue
+ */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_b(b_off, ctx);
+ emit_nop(ctx);
+ }
+ break;
+ case BPF_MISC | BPF_TAX:
+ /* X = A */
+ ctx->flags |= SEEN_X | SEEN_A;
+ emit_jit_reg_move(r_X, r_A, ctx);
+ break;
+ case BPF_MISC | BPF_TXA:
+ /* A = X */
+ ctx->flags |= SEEN_A | SEEN_X;
+ emit_jit_reg_move(r_A, r_X, ctx);
+ break;
+ /* AUX */
+ case BPF_ANC | SKF_AD_PROTOCOL:
+ /* A = ntohs(skb->protocol */
+ ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ protocol) != 2);
+ off = offsetof(struct sk_buff, protocol);
+ emit_half_load(r_A, r_skb, off, ctx);
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ /* This needs little endian fixup */
+ if (cpu_has_wsbh) {
+ /* R2 and later have the wsbh instruction */
+ emit_wsbh(r_A, r_A, ctx);
+ } else {
+ /* Get first byte */
+ emit_andi(r_tmp_imm, r_A, 0xff, ctx);
+ /* Shift it */
+ emit_sll(r_tmp, r_tmp_imm, 8, ctx);
+ /* Get second byte */
+ emit_srl(r_tmp_imm, r_A, 8, ctx);
+ emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
+ /* Put everyting together in r_A */
+ emit_or(r_A, r_tmp, r_tmp_imm, ctx);
+ }
+#endif
+ break;
+ case BPF_ANC | SKF_AD_CPU:
+ ctx->flags |= SEEN_A | SEEN_OFF;
+ /* A = current_thread_info()->cpu */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
+ cpu) != 4);
+ off = offsetof(struct thread_info, cpu);
+ /* $28/gp points to the thread_info struct */
+ emit_load(r_A, 28, off, ctx);
+ break;
+ case BPF_ANC | SKF_AD_IFINDEX:
+ /* A = skb->dev->ifindex */
+ case BPF_ANC | SKF_AD_HATYPE:
+ /* A = skb->dev->type */
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ off = offsetof(struct sk_buff, dev);
+ /* Load *dev pointer */
+ emit_load_ptr(r_s0, r_skb, off, ctx);
+ /* error (0) in the delay slot */
+ b_off = b_imm(prog->len, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_bcond(MIPS_COND_EQ, r_s0, r_zero, b_off, ctx);
+ emit_reg_move(r_ret, r_zero, ctx);
+ if (code == (BPF_ANC | SKF_AD_IFINDEX)) {
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
+ off = offsetof(struct net_device, ifindex);
+ emit_load(r_A, r_s0, off, ctx);
+ } else { /* (code == (BPF_ANC | SKF_AD_HATYPE) */
+ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2);
+ off = offsetof(struct net_device, type);
+ emit_half_load_unsigned(r_A, r_s0, off, ctx);
+ }
+ break;
+ case BPF_ANC | SKF_AD_MARK:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
+ off = offsetof(struct sk_buff, mark);
+ emit_load(r_A, r_skb, off, ctx);
+ break;
+ case BPF_ANC | SKF_AD_RXHASH:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
+ off = offsetof(struct sk_buff, hash);
+ emit_load(r_A, r_skb, off, ctx);
+ break;
+ case BPF_ANC | SKF_AD_VLAN_TAG:
+ case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ vlan_tci) != 2);
+ off = offsetof(struct sk_buff, vlan_tci);
+ emit_half_load_unsigned(r_s0, r_skb, off, ctx);
+ if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+ emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
+ } else {
+ emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
+ /* return 1 if present */
+ emit_sltu(r_A, r_zero, r_A, ctx);
+ }
+ break;
+ case BPF_ANC | SKF_AD_PKTTYPE:
+ ctx->flags |= SEEN_SKB;
+
+ emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
+ /* Keep only the last 3 bits */
+ emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+ /* Get the actual packet type to the lower 3 bits */
+ emit_srl(r_A, r_A, 5, ctx);
+#endif
+ break;
+ case BPF_ANC | SKF_AD_QUEUE:
+ ctx->flags |= SEEN_SKB | SEEN_A;
+ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
+ queue_mapping) != 2);
+ BUILD_BUG_ON(offsetof(struct sk_buff,
+ queue_mapping) > 0xff);
+ off = offsetof(struct sk_buff, queue_mapping);
+ emit_half_load_unsigned(r_A, r_skb, off, ctx);
+ break;
+ default:
+ pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
+ inst->code);
+ return -1;
+ }
+ }
+
+ /* compute offsets only during the first pass */
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ return 0;
+}
+
+void bpf_jit_compile(struct bpf_prog *fp)
+{
+ struct jit_ctx ctx;
+ unsigned int alloc_size, tmp_idx;
+
+ if (!bpf_jit_enable)
+ return;
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+ if (ctx.offsets == NULL)
+ return;
+
+ ctx.skf = fp;
+
+ if (build_body(&ctx))
+ goto out;
+
+ tmp_idx = ctx.idx;
+ build_prologue(&ctx);
+ ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
+ /* just to complete the ctx.idx count */
+ build_epilogue(&ctx);
+
+ alloc_size = 4 * ctx.idx;
+ ctx.target = module_alloc(alloc_size);
+ if (ctx.target == NULL)
+ goto out;
+
+ /* Clean it */
+ memset(ctx.target, 0, alloc_size);
+
+ ctx.idx = 0;
+
+ /* Generate the actual JIT code */
+ build_prologue(&ctx);
+ if (build_body(&ctx)) {
+ module_memfree(ctx.target);
+ goto out;
+ }
+ build_epilogue(&ctx);
+
+ /* Update the icache */
+ flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
+
+ if (bpf_jit_enable > 1)
+ /* Dump JIT code */
+ bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
+
+ fp->bpf_func = (void *)ctx.target;
+ fp->jited = 1;
+
+out:
+ kfree(ctx.offsets);
+}
+
+void bpf_jit_free(struct bpf_prog *fp)
+{
+ if (fp->jited)
+ module_memfree(fp->bpf_func);
+
+ bpf_prog_unlock_free(fp);
+}
diff --git a/arch/mips/net/bpf_jit.h b/arch/mips/net/bpf_jit.h
new file mode 100644
index 000000000..8f9f54841
--- /dev/null
+++ b/arch/mips/net/bpf_jit.h
@@ -0,0 +1,84 @@
+/*
+ * Just-In-Time compiler for BPF filters on MIPS
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#ifndef BPF_JIT_MIPS_OP_H
+#define BPF_JIT_MIPS_OP_H
+
+/* Registers used by JIT */
+#define MIPS_R_ZERO 0
+#define MIPS_R_V0 2
+#define MIPS_R_A0 4
+#define MIPS_R_A1 5
+#define MIPS_R_T4 12
+#define MIPS_R_T5 13
+#define MIPS_R_T6 14
+#define MIPS_R_T7 15
+#define MIPS_R_S0 16
+#define MIPS_R_S1 17
+#define MIPS_R_S2 18
+#define MIPS_R_S3 19
+#define MIPS_R_S4 20
+#define MIPS_R_S5 21
+#define MIPS_R_S6 22
+#define MIPS_R_S7 23
+#define MIPS_R_SP 29
+#define MIPS_R_RA 31
+
+/* Conditional codes */
+#define MIPS_COND_EQ 0x1
+#define MIPS_COND_GE (0x1 << 1)
+#define MIPS_COND_GT (0x1 << 2)
+#define MIPS_COND_NE (0x1 << 3)
+#define MIPS_COND_ALL (0x1 << 4)
+/* Conditionals on X register or K immediate */
+#define MIPS_COND_X (0x1 << 5)
+#define MIPS_COND_K (0x1 << 6)
+
+#define r_ret MIPS_R_V0
+
+/*
+ * Use 2 scratch registers to avoid pipeline interlocks.
+ * There is no overhead during epilogue and prologue since
+ * any of the $s0-$s6 registers will only be preserved if
+ * they are going to actually be used.
+ */
+#define r_skb_hl MIPS_R_S0 /* skb header length */
+#define r_skb_data MIPS_R_S1 /* skb actual data */
+#define r_off MIPS_R_S2
+#define r_A MIPS_R_S3
+#define r_X MIPS_R_S4
+#define r_skb MIPS_R_S5
+#define r_M MIPS_R_S6
+#define r_skb_len MIPS_R_S7
+#define r_s0 MIPS_R_T4 /* scratch reg 1 */
+#define r_s1 MIPS_R_T5 /* scratch reg 2 */
+#define r_tmp_imm MIPS_R_T6 /* No need to preserve this */
+#define r_tmp MIPS_R_T7 /* No need to preserve this */
+#define r_zero MIPS_R_ZERO
+#define r_sp MIPS_R_SP
+#define r_ra MIPS_R_RA
+
+#ifndef __ASSEMBLY__
+
+/* Declare ASM helpers */
+
+#define DECLARE_LOAD_FUNC(func) \
+ extern u8 func(unsigned long *skb, int offset); \
+ extern u8 func##_negative(unsigned long *skb, int offset); \
+ extern u8 func##_positive(unsigned long *skb, int offset)
+
+DECLARE_LOAD_FUNC(sk_load_word);
+DECLARE_LOAD_FUNC(sk_load_half);
+DECLARE_LOAD_FUNC(sk_load_byte);
+
+#endif
+
+#endif /* BPF_JIT_MIPS_OP_H */
diff --git a/arch/mips/net/bpf_jit_asm.S b/arch/mips/net/bpf_jit_asm.S
new file mode 100644
index 000000000..57154c588
--- /dev/null
+++ b/arch/mips/net/bpf_jit_asm.S
@@ -0,0 +1,285 @@
+/*
+ * bpf_jib_asm.S: Packet/header access helper functions for MIPS/MIPS64 BPF
+ * compiler.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <asm/asm.h>
+#include <asm/isa-rev.h>
+#include <asm/regdef.h>
+#include "bpf_jit.h"
+
+/* ABI
+ *
+ * r_skb_hl skb header length
+ * r_skb_data skb data
+ * r_off(a1) offset register
+ * r_A BPF register A
+ * r_X PF register X
+ * r_skb(a0) *skb
+ * r_M *scratch memory
+ * r_skb_le skb length
+ * r_s0 Scratch register 0
+ * r_s1 Scratch register 1
+ *
+ * On entry:
+ * a0: *skb
+ * a1: offset (imm or imm + X)
+ *
+ * All non-BPF-ABI registers are free for use. On return, we only
+ * care about r_ret. The BPF-ABI registers are assumed to remain
+ * unmodified during the entire filter operation.
+ */
+
+#define skb a0
+#define offset a1
+#define SKF_LL_OFF (-0x200000) /* Can't include linux/filter.h in assembly */
+
+ /* We know better :) so prevent assembler reordering etc */
+ .set noreorder
+
+#define is_offset_negative(TYPE) \
+ /* If offset is negative we have more work to do */ \
+ slti t0, offset, 0; \
+ bgtz t0, bpf_slow_path_##TYPE##_neg; \
+ /* Be careful what follows in DS. */
+
+#define is_offset_in_header(SIZE, TYPE) \
+ /* Reading from header? */ \
+ addiu $r_s0, $r_skb_hl, -SIZE; \
+ slt t0, $r_s0, offset; \
+ bgtz t0, bpf_slow_path_##TYPE; \
+
+LEAF(sk_load_word)
+ is_offset_negative(word)
+FEXPORT(sk_load_word_positive)
+ is_offset_in_header(4, word)
+ /* Offset within header boundaries */
+ PTR_ADDU t1, $r_skb_data, offset
+ .set reorder
+ lw $r_A, 0(t1)
+ .set noreorder
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+ wsbh t0, $r_A
+ rotr $r_A, t0, 16
+# else
+ sll t0, $r_A, 24
+ srl t1, $r_A, 24
+ srl t2, $r_A, 8
+ or t0, t0, t1
+ andi t2, t2, 0xff00
+ andi t1, $r_A, 0xff00
+ or t0, t0, t2
+ sll t1, t1, 8
+ or $r_A, t0, t1
+# endif
+#endif
+ jr $r_ra
+ move $r_ret, zero
+ END(sk_load_word)
+
+LEAF(sk_load_half)
+ is_offset_negative(half)
+FEXPORT(sk_load_half_positive)
+ is_offset_in_header(2, half)
+ /* Offset within header boundaries */
+ PTR_ADDU t1, $r_skb_data, offset
+ lhu $r_A, 0(t1)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+ wsbh $r_A, $r_A
+# else
+ sll t0, $r_A, 8
+ srl t1, $r_A, 8
+ andi t0, t0, 0xff00
+ or $r_A, t0, t1
+# endif
+#endif
+ jr $r_ra
+ move $r_ret, zero
+ END(sk_load_half)
+
+LEAF(sk_load_byte)
+ is_offset_negative(byte)
+FEXPORT(sk_load_byte_positive)
+ is_offset_in_header(1, byte)
+ /* Offset within header boundaries */
+ PTR_ADDU t1, $r_skb_data, offset
+ lbu $r_A, 0(t1)
+ jr $r_ra
+ move $r_ret, zero
+ END(sk_load_byte)
+
+/*
+ * call skb_copy_bits:
+ * (prototype in linux/skbuff.h)
+ *
+ * int skb_copy_bits(sk_buff *skb, int offset, void *to, int len)
+ *
+ * o32 mandates we leave 4 spaces for argument registers in case
+ * the callee needs to use them. Even though we don't care about
+ * the argument registers ourselves, we need to allocate that space
+ * to remain ABI compliant since the callee may want to use that space.
+ * We also allocate 2 more spaces for $r_ra and our return register (*to).
+ *
+ * n64 is a bit different. The *caller* will allocate the space to preserve
+ * the arguments. So in 64-bit kernels, we allocate the 4-arg space for no
+ * good reason but it does not matter that much really.
+ *
+ * (void *to) is returned in r_s0
+ *
+ */
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+#define DS_OFFSET(SIZE) (4 * SZREG)
+#else
+#define DS_OFFSET(SIZE) ((4 * SZREG) + (4 - SIZE))
+#endif
+#define bpf_slow_path_common(SIZE) \
+ /* Quick check. Are we within reasonable boundaries? */ \
+ LONG_ADDIU $r_s1, $r_skb_len, -SIZE; \
+ sltu $r_s0, offset, $r_s1; \
+ beqz $r_s0, fault; \
+ /* Load 4th argument in DS */ \
+ LONG_ADDIU a3, zero, SIZE; \
+ PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
+ PTR_LA t0, skb_copy_bits; \
+ PTR_S $r_ra, (5 * SZREG)($r_sp); \
+ /* Assign low slot to a2 */ \
+ PTR_ADDIU a2, $r_sp, DS_OFFSET(SIZE); \
+ jalr t0; \
+ /* Reset our destination slot (DS but it's ok) */ \
+ INT_S zero, (4 * SZREG)($r_sp); \
+ /* \
+ * skb_copy_bits returns 0 on success and -EFAULT \
+ * on error. Our data live in a2. Do not bother with \
+ * our data if an error has been returned. \
+ */ \
+ /* Restore our frame */ \
+ PTR_L $r_ra, (5 * SZREG)($r_sp); \
+ INT_L $r_s0, (4 * SZREG)($r_sp); \
+ bltz v0, fault; \
+ PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
+ move $r_ret, zero; \
+
+NESTED(bpf_slow_path_word, (6 * SZREG), $r_sp)
+ bpf_slow_path_common(4)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+ wsbh t0, $r_s0
+ jr $r_ra
+ rotr $r_A, t0, 16
+# else
+ sll t0, $r_s0, 24
+ srl t1, $r_s0, 24
+ srl t2, $r_s0, 8
+ or t0, t0, t1
+ andi t2, t2, 0xff00
+ andi t1, $r_s0, 0xff00
+ or t0, t0, t2
+ sll t1, t1, 8
+ jr $r_ra
+ or $r_A, t0, t1
+# endif
+#else
+ jr $r_ra
+ move $r_A, $r_s0
+#endif
+
+ END(bpf_slow_path_word)
+
+NESTED(bpf_slow_path_half, (6 * SZREG), $r_sp)
+ bpf_slow_path_common(2)
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+# if MIPS_ISA_REV >= 2
+ jr $r_ra
+ wsbh $r_A, $r_s0
+# else
+ sll t0, $r_s0, 8
+ andi t1, $r_s0, 0xff00
+ andi t0, t0, 0xff00
+ srl t1, t1, 8
+ jr $r_ra
+ or $r_A, t0, t1
+# endif
+#else
+ jr $r_ra
+ move $r_A, $r_s0
+#endif
+
+ END(bpf_slow_path_half)
+
+NESTED(bpf_slow_path_byte, (6 * SZREG), $r_sp)
+ bpf_slow_path_common(1)
+ jr $r_ra
+ move $r_A, $r_s0
+
+ END(bpf_slow_path_byte)
+
+/*
+ * Negative entry points
+ */
+ .macro bpf_is_end_of_data
+ li t0, SKF_LL_OFF
+ /* Reading link layer data? */
+ slt t1, offset, t0
+ bgtz t1, fault
+ /* Be careful what follows in DS. */
+ .endm
+/*
+ * call skb_copy_bits:
+ * (prototype in linux/filter.h)
+ *
+ * void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
+ * int k, unsigned int size)
+ *
+ * see above (bpf_slow_path_common) for ABI restrictions
+ */
+#define bpf_negative_common(SIZE) \
+ PTR_ADDIU $r_sp, $r_sp, -(6 * SZREG); \
+ PTR_LA t0, bpf_internal_load_pointer_neg_helper; \
+ PTR_S $r_ra, (5 * SZREG)($r_sp); \
+ jalr t0; \
+ li a2, SIZE; \
+ PTR_L $r_ra, (5 * SZREG)($r_sp); \
+ /* Check return pointer */ \
+ beqz v0, fault; \
+ PTR_ADDIU $r_sp, $r_sp, 6 * SZREG; \
+ /* Preserve our pointer */ \
+ move $r_s0, v0; \
+ /* Set return value */ \
+ move $r_ret, zero; \
+
+bpf_slow_path_word_neg:
+ bpf_is_end_of_data
+NESTED(sk_load_word_negative, (6 * SZREG), $r_sp)
+ bpf_negative_common(4)
+ jr $r_ra
+ lw $r_A, 0($r_s0)
+ END(sk_load_word_negative)
+
+bpf_slow_path_half_neg:
+ bpf_is_end_of_data
+NESTED(sk_load_half_negative, (6 * SZREG), $r_sp)
+ bpf_negative_common(2)
+ jr $r_ra
+ lhu $r_A, 0($r_s0)
+ END(sk_load_half_negative)
+
+bpf_slow_path_byte_neg:
+ bpf_is_end_of_data
+NESTED(sk_load_byte_negative, (6 * SZREG), $r_sp)
+ bpf_negative_common(1)
+ jr $r_ra
+ lbu $r_A, 0($r_s0)
+ END(sk_load_byte_negative)
+
+fault:
+ jr $r_ra
+ addiu $r_ret, zero, 1
diff --git a/arch/mips/net/ebpf_jit.c b/arch/mips/net/ebpf_jit.c
new file mode 100644
index 000000000..947a7172c
--- /dev/null
+++ b/arch/mips/net/ebpf_jit.c
@@ -0,0 +1,1849 @@
+/*
+ * Just-In-Time compiler for eBPF filters on MIPS
+ *
+ * Copyright (c) 2017 Cavium, Inc.
+ *
+ * Based on code from:
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2 of the License.
+ */
+
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/slab.h>
+#include <asm/bitops.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/uasm.h>
+
+/* Registers used by JIT */
+#define MIPS_R_ZERO 0
+#define MIPS_R_AT 1
+#define MIPS_R_V0 2 /* BPF_R0 */
+#define MIPS_R_V1 3
+#define MIPS_R_A0 4 /* BPF_R1 */
+#define MIPS_R_A1 5 /* BPF_R2 */
+#define MIPS_R_A2 6 /* BPF_R3 */
+#define MIPS_R_A3 7 /* BPF_R4 */
+#define MIPS_R_A4 8 /* BPF_R5 */
+#define MIPS_R_T4 12 /* BPF_AX */
+#define MIPS_R_T5 13
+#define MIPS_R_T6 14
+#define MIPS_R_T7 15
+#define MIPS_R_S0 16 /* BPF_R6 */
+#define MIPS_R_S1 17 /* BPF_R7 */
+#define MIPS_R_S2 18 /* BPF_R8 */
+#define MIPS_R_S3 19 /* BPF_R9 */
+#define MIPS_R_S4 20 /* BPF_TCC */
+#define MIPS_R_S5 21
+#define MIPS_R_S6 22
+#define MIPS_R_S7 23
+#define MIPS_R_T8 24
+#define MIPS_R_T9 25
+#define MIPS_R_SP 29
+#define MIPS_R_RA 31
+
+/* eBPF flags */
+#define EBPF_SAVE_S0 BIT(0)
+#define EBPF_SAVE_S1 BIT(1)
+#define EBPF_SAVE_S2 BIT(2)
+#define EBPF_SAVE_S3 BIT(3)
+#define EBPF_SAVE_S4 BIT(4)
+#define EBPF_SAVE_RA BIT(5)
+#define EBPF_SEEN_FP BIT(6)
+#define EBPF_SEEN_TC BIT(7)
+#define EBPF_TCC_IN_V1 BIT(8)
+
+/*
+ * For the mips64 ISA, we need to track the value range or type for
+ * each JIT register. The BPF machine requires zero extended 32-bit
+ * values, but the mips64 ISA requires sign extended 32-bit values.
+ * At each point in the BPF program we track the state of every
+ * register so that we can zero extend or sign extend as the BPF
+ * semantics require.
+ */
+enum reg_val_type {
+ /* uninitialized */
+ REG_UNKNOWN,
+ /* not known to be 32-bit compatible. */
+ REG_64BIT,
+ /* 32-bit compatible, no truncation needed for 64-bit ops. */
+ REG_64BIT_32BIT,
+ /* 32-bit compatible, need truncation for 64-bit ops. */
+ REG_32BIT,
+ /* 32-bit zero extended. */
+ REG_32BIT_ZERO_EX,
+ /* 32-bit no sign/zero extension needed. */
+ REG_32BIT_POS
+};
+
+/*
+ * high bit of offsets indicates if long branch conversion done at
+ * this insn.
+ */
+#define OFFSETS_B_CONV BIT(31)
+
+/**
+ * struct jit_ctx - JIT context
+ * @skf: The sk_filter
+ * @stack_size: eBPF stack size
+ * @idx: Instruction index
+ * @flags: JIT flags
+ * @offsets: Instruction offsets
+ * @target: Memory location for the compiled filter
+ * @reg_val_types Packed enum reg_val_type for each register.
+ */
+struct jit_ctx {
+ const struct bpf_prog *skf;
+ int stack_size;
+ u32 idx;
+ u32 flags;
+ u32 *offsets;
+ u32 *target;
+ u64 *reg_val_types;
+ unsigned int long_b_conversion:1;
+ unsigned int gen_b_offsets:1;
+ unsigned int use_bbit_insns:1;
+};
+
+static void set_reg_val_type(u64 *rvt, int reg, enum reg_val_type type)
+{
+ *rvt &= ~(7ull << (reg * 3));
+ *rvt |= ((u64)type << (reg * 3));
+}
+
+static enum reg_val_type get_reg_val_type(const struct jit_ctx *ctx,
+ int index, int reg)
+{
+ return (ctx->reg_val_types[index] >> (reg * 3)) & 7;
+}
+
+/* Simply emit the instruction if the JIT memory space has been allocated */
+#define emit_instr(ctx, func, ...) \
+do { \
+ if ((ctx)->target != NULL) { \
+ u32 *p = &(ctx)->target[ctx->idx]; \
+ uasm_i_##func(&p, ##__VA_ARGS__); \
+ } \
+ (ctx)->idx++; \
+} while (0)
+
+static unsigned int j_target(struct jit_ctx *ctx, int target_idx)
+{
+ unsigned long target_va, base_va;
+ unsigned int r;
+
+ if (!ctx->target)
+ return 0;
+
+ base_va = (unsigned long)ctx->target;
+ target_va = base_va + (ctx->offsets[target_idx] & ~OFFSETS_B_CONV);
+
+ if ((base_va & ~0x0ffffffful) != (target_va & ~0x0ffffffful))
+ return (unsigned int)-1;
+ r = target_va & 0x0ffffffful;
+ return r;
+}
+
+/* Compute the immediate value for PC-relative branches. */
+static u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
+{
+ if (!ctx->gen_b_offsets)
+ return 0;
+
+ /*
+ * We want a pc-relative branch. tgt is the instruction offset
+ * we want to jump to.
+
+ * Branch on MIPS:
+ * I: target_offset <- sign_extend(offset)
+ * I+1: PC += target_offset (delay slot)
+ *
+ * ctx->idx currently points to the branch instruction
+ * but the offset is added to the delay slot so we need
+ * to subtract 4.
+ */
+ return (ctx->offsets[tgt] & ~OFFSETS_B_CONV) -
+ (ctx->idx * 4) - 4;
+}
+
+enum which_ebpf_reg {
+ src_reg,
+ src_reg_no_fp,
+ dst_reg,
+ dst_reg_fp_ok
+};
+
+/*
+ * For eBPF, the register mapping naturally falls out of the
+ * requirements of eBPF and the MIPS n64 ABI. We don't maintain a
+ * separate frame pointer, so BPF_REG_10 relative accesses are
+ * adjusted to be $sp relative.
+ */
+int ebpf_to_mips_reg(struct jit_ctx *ctx, const struct bpf_insn *insn,
+ enum which_ebpf_reg w)
+{
+ int ebpf_reg = (w == src_reg || w == src_reg_no_fp) ?
+ insn->src_reg : insn->dst_reg;
+
+ switch (ebpf_reg) {
+ case BPF_REG_0:
+ return MIPS_R_V0;
+ case BPF_REG_1:
+ return MIPS_R_A0;
+ case BPF_REG_2:
+ return MIPS_R_A1;
+ case BPF_REG_3:
+ return MIPS_R_A2;
+ case BPF_REG_4:
+ return MIPS_R_A3;
+ case BPF_REG_5:
+ return MIPS_R_A4;
+ case BPF_REG_6:
+ ctx->flags |= EBPF_SAVE_S0;
+ return MIPS_R_S0;
+ case BPF_REG_7:
+ ctx->flags |= EBPF_SAVE_S1;
+ return MIPS_R_S1;
+ case BPF_REG_8:
+ ctx->flags |= EBPF_SAVE_S2;
+ return MIPS_R_S2;
+ case BPF_REG_9:
+ ctx->flags |= EBPF_SAVE_S3;
+ return MIPS_R_S3;
+ case BPF_REG_10:
+ if (w == dst_reg || w == src_reg_no_fp)
+ goto bad_reg;
+ ctx->flags |= EBPF_SEEN_FP;
+ /*
+ * Needs special handling, return something that
+ * cannot be clobbered just in case.
+ */
+ return MIPS_R_ZERO;
+ case BPF_REG_AX:
+ return MIPS_R_T4;
+ default:
+bad_reg:
+ WARN(1, "Illegal bpf reg: %d\n", ebpf_reg);
+ return -EINVAL;
+ }
+}
+/*
+ * eBPF stack frame will be something like:
+ *
+ * Entry $sp ------> +--------------------------------+
+ * | $ra (optional) |
+ * +--------------------------------+
+ * | $s0 (optional) |
+ * +--------------------------------+
+ * | $s1 (optional) |
+ * +--------------------------------+
+ * | $s2 (optional) |
+ * +--------------------------------+
+ * | $s3 (optional) |
+ * +--------------------------------+
+ * | $s4 (optional) |
+ * +--------------------------------+
+ * | tmp-storage (if $ra saved) |
+ * $sp + tmp_offset --> +--------------------------------+ <--BPF_REG_10
+ * | BPF_REG_10 relative storage |
+ * | MAX_BPF_STACK (optional) |
+ * | . |
+ * | . |
+ * | . |
+ * $sp --------> +--------------------------------+
+ *
+ * If BPF_REG_10 is never referenced, then the MAX_BPF_STACK sized
+ * area is not allocated.
+ */
+static int gen_int_prologue(struct jit_ctx *ctx)
+{
+ int stack_adjust = 0;
+ int store_offset;
+ int locals_size;
+
+ if (ctx->flags & EBPF_SAVE_RA)
+ /*
+ * If RA we are doing a function call and may need
+ * extra 8-byte tmp area.
+ */
+ stack_adjust += 16;
+ if (ctx->flags & EBPF_SAVE_S0)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S1)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S2)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S3)
+ stack_adjust += 8;
+ if (ctx->flags & EBPF_SAVE_S4)
+ stack_adjust += 8;
+
+ BUILD_BUG_ON(MAX_BPF_STACK & 7);
+ locals_size = (ctx->flags & EBPF_SEEN_FP) ? MAX_BPF_STACK : 0;
+
+ stack_adjust += locals_size;
+
+ ctx->stack_size = stack_adjust;
+
+ /*
+ * First instruction initializes the tail call count (TCC).
+ * On tail call we skip this instruction, and the TCC is
+ * passed in $v1 from the caller.
+ */
+ emit_instr(ctx, daddiu, MIPS_R_V1, MIPS_R_ZERO, MAX_TAIL_CALL_CNT);
+ if (stack_adjust)
+ emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, -stack_adjust);
+ else
+ return 0;
+
+ store_offset = stack_adjust - 8;
+
+ if (ctx->flags & EBPF_SAVE_RA) {
+ emit_instr(ctx, sd, MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S0) {
+ emit_instr(ctx, sd, MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S1) {
+ emit_instr(ctx, sd, MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S2) {
+ emit_instr(ctx, sd, MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S3) {
+ emit_instr(ctx, sd, MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S4) {
+ emit_instr(ctx, sd, MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+
+ if ((ctx->flags & EBPF_SEEN_TC) && !(ctx->flags & EBPF_TCC_IN_V1))
+ emit_instr(ctx, daddu, MIPS_R_S4, MIPS_R_V1, MIPS_R_ZERO);
+
+ return 0;
+}
+
+static int build_int_epilogue(struct jit_ctx *ctx, int dest_reg)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ int stack_adjust = ctx->stack_size;
+ int store_offset = stack_adjust - 8;
+ enum reg_val_type td;
+ int r0 = MIPS_R_V0;
+
+ if (dest_reg == MIPS_R_RA) {
+ /* Don't let zero extended value escape. */
+ td = get_reg_val_type(ctx, prog->len, BPF_REG_0);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+ emit_instr(ctx, sll, r0, r0, 0);
+ }
+
+ if (ctx->flags & EBPF_SAVE_RA) {
+ emit_instr(ctx, ld, MIPS_R_RA, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S0) {
+ emit_instr(ctx, ld, MIPS_R_S0, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S1) {
+ emit_instr(ctx, ld, MIPS_R_S1, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S2) {
+ emit_instr(ctx, ld, MIPS_R_S2, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S3) {
+ emit_instr(ctx, ld, MIPS_R_S3, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ if (ctx->flags & EBPF_SAVE_S4) {
+ emit_instr(ctx, ld, MIPS_R_S4, store_offset, MIPS_R_SP);
+ store_offset -= 8;
+ }
+ emit_instr(ctx, jr, dest_reg);
+
+ if (stack_adjust)
+ emit_instr(ctx, daddiu, MIPS_R_SP, MIPS_R_SP, stack_adjust);
+ else
+ emit_instr(ctx, nop);
+
+ return 0;
+}
+
+static void gen_imm_to_reg(const struct bpf_insn *insn, int reg,
+ struct jit_ctx *ctx)
+{
+ if (insn->imm >= S16_MIN && insn->imm <= S16_MAX) {
+ emit_instr(ctx, addiu, reg, MIPS_R_ZERO, insn->imm);
+ } else {
+ int lower = (s16)(insn->imm & 0xffff);
+ int upper = insn->imm - lower;
+
+ emit_instr(ctx, lui, reg, upper >> 16);
+ emit_instr(ctx, addiu, reg, reg, lower);
+ }
+}
+
+static int gen_imm_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ int idx)
+{
+ int upper_bound, lower_bound;
+ int dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+
+ if (dst < 0)
+ return dst;
+
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ case BPF_ADD:
+ upper_bound = S16_MAX;
+ lower_bound = S16_MIN;
+ break;
+ case BPF_SUB:
+ upper_bound = -(int)S16_MIN;
+ lower_bound = -(int)S16_MAX;
+ break;
+ case BPF_AND:
+ case BPF_OR:
+ case BPF_XOR:
+ upper_bound = 0xffff;
+ lower_bound = 0;
+ break;
+ case BPF_RSH:
+ case BPF_LSH:
+ case BPF_ARSH:
+ /* Shift amounts are truncated, no need for bounds */
+ upper_bound = S32_MAX;
+ lower_bound = S32_MIN;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /*
+ * Immediate move clobbers the register, so no sign/zero
+ * extension needed.
+ */
+ if (BPF_CLASS(insn->code) == BPF_ALU64 &&
+ BPF_OP(insn->code) != BPF_MOV &&
+ get_reg_val_type(ctx, idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ /* BPF_ALU | BPF_LSH doesn't need separate sign extension */
+ if (BPF_CLASS(insn->code) == BPF_ALU &&
+ BPF_OP(insn->code) != BPF_LSH &&
+ BPF_OP(insn->code) != BPF_MOV &&
+ get_reg_val_type(ctx, idx, insn->dst_reg) != REG_32BIT)
+ emit_instr(ctx, sll, dst, dst, 0);
+
+ if (insn->imm >= lower_bound && insn->imm <= upper_bound) {
+ /* single insn immediate case */
+ switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+ case BPF_ALU64 | BPF_MOV:
+ emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_AND:
+ case BPF_ALU | BPF_AND:
+ emit_instr(ctx, andi, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_OR:
+ case BPF_ALU | BPF_OR:
+ emit_instr(ctx, ori, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_XOR:
+ case BPF_ALU | BPF_XOR:
+ emit_instr(ctx, xori, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_ADD:
+ emit_instr(ctx, daddiu, dst, dst, insn->imm);
+ break;
+ case BPF_ALU64 | BPF_SUB:
+ emit_instr(ctx, daddiu, dst, dst, -insn->imm);
+ break;
+ case BPF_ALU64 | BPF_RSH:
+ emit_instr(ctx, dsrl_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_RSH:
+ emit_instr(ctx, srl, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU64 | BPF_LSH:
+ emit_instr(ctx, dsll_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_LSH:
+ emit_instr(ctx, sll, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU64 | BPF_ARSH:
+ emit_instr(ctx, dsra_safe, dst, dst, insn->imm & 0x3f);
+ break;
+ case BPF_ALU | BPF_ARSH:
+ emit_instr(ctx, sra, dst, dst, insn->imm & 0x1f);
+ break;
+ case BPF_ALU | BPF_MOV:
+ emit_instr(ctx, addiu, dst, MIPS_R_ZERO, insn->imm);
+ break;
+ case BPF_ALU | BPF_ADD:
+ emit_instr(ctx, addiu, dst, dst, insn->imm);
+ break;
+ case BPF_ALU | BPF_SUB:
+ emit_instr(ctx, addiu, dst, dst, -insn->imm);
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ /* multi insn immediate case */
+ if (BPF_OP(insn->code) == BPF_MOV) {
+ gen_imm_to_reg(insn, dst, ctx);
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ switch (BPF_OP(insn->code) | BPF_CLASS(insn->code)) {
+ case BPF_ALU64 | BPF_AND:
+ case BPF_ALU | BPF_AND:
+ emit_instr(ctx, and, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_OR:
+ case BPF_ALU | BPF_OR:
+ emit_instr(ctx, or, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_XOR:
+ case BPF_ALU | BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_ADD:
+ emit_instr(ctx, daddu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU64 | BPF_SUB:
+ emit_instr(ctx, dsubu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU | BPF_ADD:
+ emit_instr(ctx, addu, dst, dst, MIPS_R_AT);
+ break;
+ case BPF_ALU | BPF_SUB:
+ emit_instr(ctx, subu, dst, dst, MIPS_R_AT);
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static void emit_const_to_reg(struct jit_ctx *ctx, int dst, u64 value)
+{
+ if (value >= 0xffffffffffff8000ull || value < 0x8000ull) {
+ emit_instr(ctx, daddiu, dst, MIPS_R_ZERO, (int)value);
+ } else if (value >= 0xffffffff80000000ull ||
+ (value < 0x80000000 && value > 0xffff)) {
+ emit_instr(ctx, lui, dst, (s32)(s16)(value >> 16));
+ emit_instr(ctx, ori, dst, dst, (unsigned int)(value & 0xffff));
+ } else {
+ int i;
+ bool seen_part = false;
+ int needed_shift = 0;
+
+ for (i = 0; i < 4; i++) {
+ u64 part = (value >> (16 * (3 - i))) & 0xffff;
+
+ if (seen_part && needed_shift > 0 && (part || i == 3)) {
+ emit_instr(ctx, dsll_safe, dst, dst, needed_shift);
+ needed_shift = 0;
+ }
+ if (part) {
+ if (i == 0 || (!seen_part && i < 3 && part < 0x8000)) {
+ emit_instr(ctx, lui, dst, (s32)(s16)part);
+ needed_shift = -16;
+ } else {
+ emit_instr(ctx, ori, dst,
+ seen_part ? dst : MIPS_R_ZERO,
+ (unsigned int)part);
+ }
+ seen_part = true;
+ }
+ if (seen_part)
+ needed_shift += 16;
+ }
+ }
+}
+
+static int emit_bpf_tail_call(struct jit_ctx *ctx, int this_idx)
+{
+ int off, b_off;
+ int tcc_reg;
+
+ ctx->flags |= EBPF_SEEN_TC;
+ /*
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ off = offsetof(struct bpf_array, map.max_entries);
+ emit_instr(ctx, lwu, MIPS_R_T5, off, MIPS_R_A1);
+ emit_instr(ctx, sltu, MIPS_R_AT, MIPS_R_T5, MIPS_R_A2);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, bne, MIPS_R_AT, MIPS_R_ZERO, b_off);
+ /*
+ * if (TCC-- < 0)
+ * goto out;
+ */
+ /* Delay slot */
+ tcc_reg = (ctx->flags & EBPF_TCC_IN_V1) ? MIPS_R_V1 : MIPS_R_S4;
+ emit_instr(ctx, daddiu, MIPS_R_T5, tcc_reg, -1);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, bltz, tcc_reg, b_off);
+ /*
+ * prog = array->ptrs[index];
+ * if (prog == NULL)
+ * goto out;
+ */
+ /* Delay slot */
+ emit_instr(ctx, dsll, MIPS_R_T8, MIPS_R_A2, 3);
+ emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, MIPS_R_A1);
+ off = offsetof(struct bpf_array, ptrs);
+ emit_instr(ctx, ld, MIPS_R_AT, off, MIPS_R_T8);
+ b_off = b_imm(this_idx + 1, ctx);
+ emit_instr(ctx, beq, MIPS_R_AT, MIPS_R_ZERO, b_off);
+ /* Delay slot */
+ emit_instr(ctx, nop);
+
+ /* goto *(prog->bpf_func + 4); */
+ off = offsetof(struct bpf_prog, bpf_func);
+ emit_instr(ctx, ld, MIPS_R_T9, off, MIPS_R_AT);
+ /* All systems are go... propagate TCC */
+ emit_instr(ctx, daddu, MIPS_R_V1, MIPS_R_T5, MIPS_R_ZERO);
+ /* Skip first instruction (TCC initialization) */
+ emit_instr(ctx, daddiu, MIPS_R_T9, MIPS_R_T9, 4);
+ return build_int_epilogue(ctx, MIPS_R_T9);
+}
+
+static bool is_bad_offset(int b_off)
+{
+ return b_off > 0x1ffff || b_off < -0x20000;
+}
+
+/* Returns the number of insn slots consumed. */
+static int build_one_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
+ int this_idx, int exit_idx)
+{
+ int src, dst, r, td, ts, mem_off, b_off;
+ bool need_swap, did_move, cmp_eq;
+ unsigned int target = 0;
+ u64 t64;
+ s64 t64s;
+ int bpf_op = BPF_OP(insn->code);
+
+ switch (insn->code) {
+ case BPF_ALU64 | BPF_ADD | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_SUB | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_OR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_AND | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_LSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_RSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_XOR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_ARSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU64 | BPF_MOV | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_MOV | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_ADD | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_SUB | BPF_K: /* ALU32_IMM */
+ case BPF_ALU | BPF_OR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_AND | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_LSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_RSH | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_XOR | BPF_K: /* ALU64_IMM */
+ case BPF_ALU | BPF_ARSH | BPF_K: /* ALU64_IMM */
+ r = gen_imm_insn(insn, ctx, this_idx);
+ if (r < 0)
+ return r;
+ break;
+ case BPF_ALU64 | BPF_MUL | BPF_K: /* ALU64_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ if (insn->imm == 1) /* Mult by 1 is a nop */
+ break;
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, dmultu, MIPS_R_AT, dst);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_ALU64 | BPF_NEG | BPF_K: /* ALU64_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ emit_instr(ctx, dsubu, dst, MIPS_R_ZERO, dst);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ if (insn->imm == 1) /* Mult by 1 is a nop */
+ break;
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, multu, dst, MIPS_R_AT);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_ALU | BPF_NEG | BPF_K: /* ALU_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ emit_instr(ctx, subu, dst, MIPS_R_ZERO, dst);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K: /* ALU_IMM */
+ case BPF_ALU | BPF_MOD | BPF_K: /* ALU_IMM */
+ if (insn->imm == 0)
+ return -EINVAL;
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX)
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ if (insn->imm == 1) {
+ /* div by 1 is a nop, mod by 1 is zero */
+ if (bpf_op == BPF_MOD)
+ emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+ break;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, divu, dst, MIPS_R_AT);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_ALU64 | BPF_DIV | BPF_K: /* ALU_IMM */
+ case BPF_ALU64 | BPF_MOD | BPF_K: /* ALU_IMM */
+ if (insn->imm == 0)
+ return -EINVAL;
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ if (insn->imm == 1) {
+ /* div by 1 is a nop, mod by 1 is zero */
+ if (bpf_op == BPF_MOD)
+ emit_instr(ctx, addu, dst, MIPS_R_ZERO, MIPS_R_ZERO);
+ break;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ emit_instr(ctx, ddivu, dst, MIPS_R_AT);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_ALU64 | BPF_MOV | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_ADD | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_SUB | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_XOR | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_OR | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_AND | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_MUL | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_DIV | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_MOD | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_LSH | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_RSH | BPF_X: /* ALU64_REG */
+ case BPF_ALU64 | BPF_ARSH | BPF_X: /* ALU64_REG */
+ src = ebpf_to_mips_reg(ctx, insn, src_reg);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ if (get_reg_val_type(ctx, this_idx, insn->dst_reg) == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+ did_move = false;
+ if (insn->src_reg == BPF_REG_10) {
+ if (bpf_op == BPF_MOV) {
+ emit_instr(ctx, daddiu, dst, MIPS_R_SP, MAX_BPF_STACK);
+ did_move = true;
+ } else {
+ emit_instr(ctx, daddiu, MIPS_R_AT, MIPS_R_SP, MAX_BPF_STACK);
+ src = MIPS_R_AT;
+ }
+ } else if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ int tmp_reg = MIPS_R_AT;
+
+ if (bpf_op == BPF_MOV) {
+ tmp_reg = dst;
+ did_move = true;
+ }
+ emit_instr(ctx, daddu, tmp_reg, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, tmp_reg, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ switch (bpf_op) {
+ case BPF_MOV:
+ if (!did_move)
+ emit_instr(ctx, daddu, dst, src, MIPS_R_ZERO);
+ break;
+ case BPF_ADD:
+ emit_instr(ctx, daddu, dst, dst, src);
+ break;
+ case BPF_SUB:
+ emit_instr(ctx, dsubu, dst, dst, src);
+ break;
+ case BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, src);
+ break;
+ case BPF_OR:
+ emit_instr(ctx, or, dst, dst, src);
+ break;
+ case BPF_AND:
+ emit_instr(ctx, and, dst, dst, src);
+ break;
+ case BPF_MUL:
+ emit_instr(ctx, dmultu, dst, src);
+ emit_instr(ctx, mflo, dst);
+ break;
+ case BPF_DIV:
+ case BPF_MOD:
+ emit_instr(ctx, ddivu, dst, src);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_LSH:
+ emit_instr(ctx, dsllv, dst, dst, src);
+ break;
+ case BPF_RSH:
+ emit_instr(ctx, dsrlv, dst, dst, src);
+ break;
+ case BPF_ARSH:
+ emit_instr(ctx, dsrav, dst, dst, src);
+ break;
+ default:
+ pr_err("ALU64_REG NOT HANDLED\n");
+ return -EINVAL;
+ }
+ break;
+ case BPF_ALU | BPF_MOV | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_ADD | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_SUB | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_XOR | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_OR | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_AND | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_MUL | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_DIV | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_MOD | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_LSH | BPF_X: /* ALU_REG */
+ case BPF_ALU | BPF_RSH | BPF_X: /* ALU_REG */
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (td == REG_64BIT || td == REG_32BIT_ZERO_EX) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+ did_move = false;
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (ts == REG_64BIT || ts == REG_32BIT_ZERO_EX) {
+ int tmp_reg = MIPS_R_AT;
+
+ if (bpf_op == BPF_MOV) {
+ tmp_reg = dst;
+ did_move = true;
+ }
+ /* sign extend */
+ emit_instr(ctx, sll, tmp_reg, src, 0);
+ src = MIPS_R_AT;
+ }
+ switch (bpf_op) {
+ case BPF_MOV:
+ if (!did_move)
+ emit_instr(ctx, addu, dst, src, MIPS_R_ZERO);
+ break;
+ case BPF_ADD:
+ emit_instr(ctx, addu, dst, dst, src);
+ break;
+ case BPF_SUB:
+ emit_instr(ctx, subu, dst, dst, src);
+ break;
+ case BPF_XOR:
+ emit_instr(ctx, xor, dst, dst, src);
+ break;
+ case BPF_OR:
+ emit_instr(ctx, or, dst, dst, src);
+ break;
+ case BPF_AND:
+ emit_instr(ctx, and, dst, dst, src);
+ break;
+ case BPF_MUL:
+ emit_instr(ctx, mul, dst, dst, src);
+ break;
+ case BPF_DIV:
+ case BPF_MOD:
+ emit_instr(ctx, divu, dst, src);
+ if (bpf_op == BPF_DIV)
+ emit_instr(ctx, mflo, dst);
+ else
+ emit_instr(ctx, mfhi, dst);
+ break;
+ case BPF_LSH:
+ emit_instr(ctx, sllv, dst, dst, src);
+ break;
+ case BPF_RSH:
+ emit_instr(ctx, srlv, dst, dst, src);
+ break;
+ default:
+ pr_err("ALU_REG NOT HANDLED\n");
+ return -EINVAL;
+ }
+ break;
+ case BPF_JMP | BPF_EXIT:
+ if (this_idx + 1 < exit_idx) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, beq, MIPS_R_ZERO, MIPS_R_ZERO, b_off);
+ emit_instr(ctx, nop);
+ }
+ break;
+ case BPF_JMP | BPF_JEQ | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JNE | BPF_K: /* JMP_IMM */
+ cmp_eq = (bpf_op == BPF_JEQ);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+ if (insn->imm == 0) {
+ src = MIPS_R_ZERO;
+ } else {
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ src = MIPS_R_AT;
+ }
+ goto jeq_common;
+ case BPF_JMP | BPF_JEQ | BPF_X: /* JMP_REG */
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (src < 0 || dst < 0)
+ return -EINVAL;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ ts = get_reg_val_type(ctx, this_idx, insn->src_reg);
+ if (td == REG_32BIT && ts != REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+ src = MIPS_R_AT;
+ } else if (ts == REG_32BIT && td != REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, dst, 0);
+ dst = MIPS_R_AT;
+ }
+ if (bpf_op == BPF_JSET) {
+ emit_instr(ctx, and, MIPS_R_AT, dst, src);
+ cmp_eq = false;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JSGT || bpf_op == BPF_JSLE) {
+ emit_instr(ctx, dsubu, MIPS_R_AT, dst, src);
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ if (bpf_op == BPF_JSGT)
+ emit_instr(ctx, blez, MIPS_R_AT, b_off);
+ else
+ emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ if (bpf_op == BPF_JSGT)
+ emit_instr(ctx, bgtz, MIPS_R_AT, b_off);
+ else
+ emit_instr(ctx, blez, MIPS_R_AT, b_off);
+ emit_instr(ctx, nop);
+ break;
+ } else if (bpf_op == BPF_JSGE || bpf_op == BPF_JSLT) {
+ emit_instr(ctx, slt, MIPS_R_AT, dst, src);
+ cmp_eq = bpf_op == BPF_JSGE;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JGT || bpf_op == BPF_JLE) {
+ /* dst or src could be AT */
+ emit_instr(ctx, dsubu, MIPS_R_T8, dst, src);
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+ /* SP known to be non-zero, movz becomes boolean not */
+ emit_instr(ctx, movz, MIPS_R_T9, MIPS_R_SP, MIPS_R_T8);
+ emit_instr(ctx, movn, MIPS_R_T9, MIPS_R_ZERO, MIPS_R_T8);
+ emit_instr(ctx, or, MIPS_R_AT, MIPS_R_T9, MIPS_R_AT);
+ cmp_eq = bpf_op == BPF_JGT;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else if (bpf_op == BPF_JGE || bpf_op == BPF_JLT) {
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, src);
+ cmp_eq = bpf_op == BPF_JGE;
+ dst = MIPS_R_AT;
+ src = MIPS_R_ZERO;
+ } else { /* JNE/JEQ case */
+ cmp_eq = (bpf_op == BPF_JEQ);
+ }
+jeq_common:
+ /*
+ * If the next insn is EXIT and we are jumping arround
+ * only it, invert the sense of the compare and
+ * conditionally jump to the exit. Poor man's branch
+ * chaining.
+ */
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, exit_idx);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ cmp_eq = !cmp_eq;
+ b_off = 4 * 3;
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ }
+
+ if (cmp_eq)
+ emit_instr(ctx, bne, dst, src, b_off);
+ else
+ emit_instr(ctx, beq, dst, src, b_off);
+ emit_instr(ctx, nop);
+ if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ }
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, this_idx + insn->off + 1);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ cmp_eq = !cmp_eq;
+ b_off = 4 * 3;
+ if (!(ctx->offsets[this_idx] & OFFSETS_B_CONV)) {
+ ctx->offsets[this_idx] |= OFFSETS_B_CONV;
+ ctx->long_b_conversion = 1;
+ }
+ }
+
+ if (cmp_eq)
+ emit_instr(ctx, beq, dst, src, b_off);
+ else
+ emit_instr(ctx, bne, dst, src, b_off);
+ emit_instr(ctx, nop);
+ if (ctx->offsets[this_idx] & OFFSETS_B_CONV) {
+ emit_instr(ctx, j, target);
+ emit_instr(ctx, nop);
+ }
+ break;
+ case BPF_JMP | BPF_JSGT | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSGE | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSLT | BPF_K: /* JMP_IMM */
+ case BPF_JMP | BPF_JSLE | BPF_K: /* JMP_IMM */
+ cmp_eq = (bpf_op == BPF_JSGE);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+
+ if (insn->imm == 0) {
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ switch (bpf_op) {
+ case BPF_JSGT:
+ emit_instr(ctx, blez, dst, b_off);
+ break;
+ case BPF_JSGE:
+ emit_instr(ctx, bltz, dst, b_off);
+ break;
+ case BPF_JSLT:
+ emit_instr(ctx, bgez, dst, b_off);
+ break;
+ case BPF_JSLE:
+ emit_instr(ctx, bgtz, dst, b_off);
+ break;
+ }
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ switch (bpf_op) {
+ case BPF_JSGT:
+ emit_instr(ctx, bgtz, dst, b_off);
+ break;
+ case BPF_JSGE:
+ emit_instr(ctx, bgez, dst, b_off);
+ break;
+ case BPF_JSLT:
+ emit_instr(ctx, bltz, dst, b_off);
+ break;
+ case BPF_JSLE:
+ emit_instr(ctx, blez, dst, b_off);
+ break;
+ }
+ emit_instr(ctx, nop);
+ break;
+ }
+ /*
+ * only "LT" compare available, so we must use imm + 1
+ * to generate "GT" and imm -1 to generate LE
+ */
+ if (bpf_op == BPF_JSGT)
+ t64s = insn->imm + 1;
+ else if (bpf_op == BPF_JSLE)
+ t64s = insn->imm + 1;
+ else
+ t64s = insn->imm;
+
+ cmp_eq = bpf_op == BPF_JSGT || bpf_op == BPF_JSGE;
+ if (t64s >= S16_MIN && t64s <= S16_MAX) {
+ emit_instr(ctx, slti, MIPS_R_AT, dst, (int)t64s);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+ }
+ emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+ emit_instr(ctx, slt, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ cmp_eq = (bpf_op == BPF_JGE);
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+ /*
+ * only "LT" compare available, so we must use imm + 1
+ * to generate "GT" and imm -1 to generate LE
+ */
+ if (bpf_op == BPF_JGT)
+ t64s = (u64)(u32)(insn->imm) + 1;
+ else if (bpf_op == BPF_JLE)
+ t64s = (u64)(u32)(insn->imm) + 1;
+ else
+ t64s = (u64)(u32)(insn->imm);
+
+ cmp_eq = bpf_op == BPF_JGT || bpf_op == BPF_JGE;
+
+ emit_const_to_reg(ctx, MIPS_R_AT, (u64)t64s);
+ emit_instr(ctx, sltu, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JSET | BPF_K: /* JMP_IMM */
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg_fp_ok);
+ if (dst < 0)
+ return dst;
+
+ if (ctx->use_bbit_insns && hweight32((u32)insn->imm) == 1) {
+ if ((insn + 1)->code == (BPF_JMP | BPF_EXIT) && insn->off == 1) {
+ b_off = b_imm(exit_idx, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bbit0, dst, ffs((u32)insn->imm) - 1, b_off);
+ emit_instr(ctx, nop);
+ return 2; /* We consumed the exit. */
+ }
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off))
+ return -E2BIG;
+ emit_instr(ctx, bbit1, dst, ffs((u32)insn->imm) - 1, b_off);
+ emit_instr(ctx, nop);
+ break;
+ }
+ t64 = (u32)insn->imm;
+ emit_const_to_reg(ctx, MIPS_R_AT, t64);
+ emit_instr(ctx, and, MIPS_R_AT, dst, MIPS_R_AT);
+ src = MIPS_R_AT;
+ dst = MIPS_R_ZERO;
+ cmp_eq = false;
+ goto jeq_common;
+
+ case BPF_JMP | BPF_JA:
+ /*
+ * Prefer relative branch for easier debugging, but
+ * fall back if needed.
+ */
+ b_off = b_imm(this_idx + insn->off + 1, ctx);
+ if (is_bad_offset(b_off)) {
+ target = j_target(ctx, this_idx + insn->off + 1);
+ if (target == (unsigned int)-1)
+ return -E2BIG;
+ emit_instr(ctx, j, target);
+ } else {
+ emit_instr(ctx, b, b_off);
+ }
+ emit_instr(ctx, nop);
+ break;
+ case BPF_LD | BPF_DW | BPF_IMM:
+ if (insn->src_reg != 0)
+ return -EINVAL;
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ t64 = ((u64)(u32)insn->imm) | ((u64)(insn + 1)->imm << 32);
+ emit_const_to_reg(ctx, dst, t64);
+ return 2; /* Double slot insn */
+
+ case BPF_JMP | BPF_CALL:
+ ctx->flags |= EBPF_SAVE_RA;
+ t64s = (s64)insn->imm + (s64)__bpf_call_base;
+ emit_const_to_reg(ctx, MIPS_R_T9, (u64)t64s);
+ emit_instr(ctx, jalr, MIPS_R_RA, MIPS_R_T9);
+ /* delay slot */
+ emit_instr(ctx, nop);
+ break;
+
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(ctx, this_idx))
+ return -EINVAL;
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ td = get_reg_val_type(ctx, this_idx, insn->dst_reg);
+ if (insn->imm == 64 && td == REG_32BIT)
+ emit_instr(ctx, dinsu, dst, MIPS_R_ZERO, 32, 32);
+
+ if (insn->imm != 64 &&
+ (td == REG_64BIT || td == REG_32BIT_ZERO_EX)) {
+ /* sign extend */
+ emit_instr(ctx, sll, dst, dst, 0);
+ }
+
+#ifdef __BIG_ENDIAN
+ need_swap = (BPF_SRC(insn->code) == BPF_FROM_LE);
+#else
+ need_swap = (BPF_SRC(insn->code) == BPF_FROM_BE);
+#endif
+ if (insn->imm == 16) {
+ if (need_swap)
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, andi, dst, dst, 0xffff);
+ } else if (insn->imm == 32) {
+ if (need_swap) {
+ emit_instr(ctx, wsbh, dst, dst);
+ emit_instr(ctx, rotr, dst, dst, 16);
+ }
+ } else { /* 64-bit*/
+ if (need_swap) {
+ emit_instr(ctx, dsbh, dst, dst);
+ emit_instr(ctx, dshd, dst, dst);
+ }
+ }
+ break;
+
+ case BPF_ST | BPF_NOSPEC: /* speculation barrier */
+ break;
+
+ case BPF_ST | BPF_B | BPF_MEM:
+ case BPF_ST | BPF_H | BPF_MEM:
+ case BPF_ST | BPF_W | BPF_MEM:
+ case BPF_ST | BPF_DW | BPF_MEM:
+ if (insn->dst_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ dst = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ mem_off = insn->off;
+ }
+ gen_imm_to_reg(insn, MIPS_R_AT, ctx);
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, sb, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_H:
+ emit_instr(ctx, sh, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, sw, MIPS_R_AT, mem_off, dst);
+ break;
+ case BPF_DW:
+ emit_instr(ctx, sd, MIPS_R_AT, mem_off, dst);
+ break;
+ }
+ break;
+
+ case BPF_LDX | BPF_B | BPF_MEM:
+ case BPF_LDX | BPF_H | BPF_MEM:
+ case BPF_LDX | BPF_W | BPF_MEM:
+ case BPF_LDX | BPF_DW | BPF_MEM:
+ if (insn->src_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ src = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return src;
+ mem_off = insn->off;
+ }
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, lbu, dst, mem_off, src);
+ break;
+ case BPF_H:
+ emit_instr(ctx, lhu, dst, mem_off, src);
+ break;
+ case BPF_W:
+ emit_instr(ctx, lw, dst, mem_off, src);
+ break;
+ case BPF_DW:
+ emit_instr(ctx, ld, dst, mem_off, src);
+ break;
+ }
+ break;
+
+ case BPF_STX | BPF_B | BPF_MEM:
+ case BPF_STX | BPF_H | BPF_MEM:
+ case BPF_STX | BPF_W | BPF_MEM:
+ case BPF_STX | BPF_DW | BPF_MEM:
+ case BPF_STX | BPF_W | BPF_XADD:
+ case BPF_STX | BPF_DW | BPF_XADD:
+ if (insn->dst_reg == BPF_REG_10) {
+ ctx->flags |= EBPF_SEEN_FP;
+ dst = MIPS_R_SP;
+ mem_off = insn->off + MAX_BPF_STACK;
+ } else {
+ dst = ebpf_to_mips_reg(ctx, insn, dst_reg);
+ if (dst < 0)
+ return dst;
+ mem_off = insn->off;
+ }
+ src = ebpf_to_mips_reg(ctx, insn, src_reg_no_fp);
+ if (src < 0)
+ return src;
+ if (BPF_MODE(insn->code) == BPF_XADD) {
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_W:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, sll, MIPS_R_AT, src, 0);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, ll, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, addu, MIPS_R_T8, MIPS_R_T8, src);
+ emit_instr(ctx, sc, MIPS_R_T8, mem_off, dst);
+ /*
+ * On failure back up to LL (-4
+ * instructions of 4 bytes each
+ */
+ emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+ emit_instr(ctx, nop);
+ break;
+ case BPF_DW:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, lld, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, daddu, MIPS_R_T8, MIPS_R_T8, src);
+ emit_instr(ctx, scd, MIPS_R_T8, mem_off, dst);
+ emit_instr(ctx, beq, MIPS_R_T8, MIPS_R_ZERO, -4 * 4);
+ emit_instr(ctx, nop);
+ break;
+ }
+ } else { /* BPF_MEM */
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_B:
+ emit_instr(ctx, sb, src, mem_off, dst);
+ break;
+ case BPF_H:
+ emit_instr(ctx, sh, src, mem_off, dst);
+ break;
+ case BPF_W:
+ emit_instr(ctx, sw, src, mem_off, dst);
+ break;
+ case BPF_DW:
+ if (get_reg_val_type(ctx, this_idx, insn->src_reg) == REG_32BIT) {
+ emit_instr(ctx, daddu, MIPS_R_AT, src, MIPS_R_ZERO);
+ emit_instr(ctx, dinsu, MIPS_R_AT, MIPS_R_ZERO, 32, 32);
+ src = MIPS_R_AT;
+ }
+ emit_instr(ctx, sd, src, mem_off, dst);
+ break;
+ }
+ }
+ break;
+
+ default:
+ pr_err("NOT HANDLED %d - (%02x)\n",
+ this_idx, (unsigned int)insn->code);
+ return -EINVAL;
+ }
+ return 1;
+}
+
+#define RVT_VISITED_MASK 0xc000000000000000ull
+#define RVT_FALL_THROUGH 0x4000000000000000ull
+#define RVT_BRANCH_TAKEN 0x8000000000000000ull
+#define RVT_DONE (RVT_FALL_THROUGH | RVT_BRANCH_TAKEN)
+
+static int build_int_body(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct bpf_insn *insn;
+ int i, r;
+
+ for (i = 0; i < prog->len; ) {
+ insn = prog->insnsi + i;
+ if ((ctx->reg_val_types[i] & RVT_VISITED_MASK) == 0) {
+ /* dead instruction, don't emit it. */
+ i++;
+ continue;
+ }
+
+ if (ctx->target == NULL)
+ ctx->offsets[i] = (ctx->offsets[i] & OFFSETS_B_CONV) | (ctx->idx * 4);
+
+ r = build_one_insn(insn, ctx, i, prog->len);
+ if (r < 0)
+ return r;
+ i += r;
+ }
+ /* epilogue offset */
+ if (ctx->target == NULL)
+ ctx->offsets[i] = ctx->idx * 4;
+
+ /*
+ * All exits have an offset of the epilogue, some offsets may
+ * not have been set due to banch-around threading, so set
+ * them now.
+ */
+ if (ctx->target == NULL)
+ for (i = 0; i < prog->len; i++) {
+ insn = prog->insnsi + i;
+ if (insn->code == (BPF_JMP | BPF_EXIT))
+ ctx->offsets[i] = ctx->idx * 4;
+ }
+ return 0;
+}
+
+/* return the last idx processed, or negative for error */
+static int reg_val_propagate_range(struct jit_ctx *ctx, u64 initial_rvt,
+ int start_idx, bool follow_taken)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ const struct bpf_insn *insn;
+ u64 exit_rvt = initial_rvt;
+ u64 *rvt = ctx->reg_val_types;
+ int idx;
+ int reg;
+
+ for (idx = start_idx; idx < prog->len; idx++) {
+ rvt[idx] = (rvt[idx] & RVT_VISITED_MASK) | exit_rvt;
+ insn = prog->insnsi + idx;
+ switch (BPF_CLASS(insn->code)) {
+ case BPF_ALU:
+ switch (BPF_OP(insn->code)) {
+ case BPF_ADD:
+ case BPF_SUB:
+ case BPF_MUL:
+ case BPF_DIV:
+ case BPF_OR:
+ case BPF_AND:
+ case BPF_LSH:
+ case BPF_RSH:
+ case BPF_NEG:
+ case BPF_MOD:
+ case BPF_XOR:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ case BPF_MOV:
+ if (BPF_SRC(insn->code)) {
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ } else {
+ /* IMM to REG move*/
+ if (insn->imm >= 0)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ }
+ break;
+ case BPF_END:
+ if (insn->imm == 64)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ else if (insn->imm == 32)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ else /* insn->imm == 16 */
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_ALU64:
+ switch (BPF_OP(insn->code)) {
+ case BPF_MOV:
+ if (BPF_SRC(insn->code)) {
+ /* REG to REG move*/
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ } else {
+ /* IMM to REG move*/
+ if (insn->imm >= 0)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+ }
+ break;
+ default:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_LD:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW:
+ if (BPF_MODE(insn->code) == BPF_IMM) {
+ s64 val;
+
+ val = (s64)((u32)insn->imm | ((u64)(insn + 1)->imm << 32));
+ if (val > 0 && val <= S32_MAX)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ else if (val >= S32_MIN && val <= S32_MAX)
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT_32BIT);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ rvt[idx] |= RVT_DONE;
+ idx++;
+ } else {
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ }
+ break;
+ case BPF_B:
+ case BPF_H:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ case BPF_W:
+ if (BPF_MODE(insn->code) == BPF_IMM)
+ set_reg_val_type(&exit_rvt, insn->dst_reg,
+ insn->imm >= 0 ? REG_32BIT_POS : REG_32BIT);
+ else
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_LDX:
+ switch (BPF_SIZE(insn->code)) {
+ case BPF_DW:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_64BIT);
+ break;
+ case BPF_B:
+ case BPF_H:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT_POS);
+ break;
+ case BPF_W:
+ set_reg_val_type(&exit_rvt, insn->dst_reg, REG_32BIT);
+ break;
+ }
+ rvt[idx] |= RVT_DONE;
+ break;
+ case BPF_JMP:
+ switch (BPF_OP(insn->code)) {
+ case BPF_EXIT:
+ rvt[idx] = RVT_DONE | exit_rvt;
+ rvt[prog->len] = exit_rvt;
+ return idx;
+ case BPF_JA:
+ rvt[idx] |= RVT_DONE;
+ idx += insn->off;
+ break;
+ case BPF_JEQ:
+ case BPF_JGT:
+ case BPF_JGE:
+ case BPF_JLT:
+ case BPF_JLE:
+ case BPF_JSET:
+ case BPF_JNE:
+ case BPF_JSGT:
+ case BPF_JSGE:
+ case BPF_JSLT:
+ case BPF_JSLE:
+ if (follow_taken) {
+ rvt[idx] |= RVT_BRANCH_TAKEN;
+ idx += insn->off;
+ follow_taken = false;
+ } else {
+ rvt[idx] |= RVT_FALL_THROUGH;
+ }
+ break;
+ case BPF_CALL:
+ set_reg_val_type(&exit_rvt, BPF_REG_0, REG_64BIT);
+ /* Upon call return, argument registers are clobbered. */
+ for (reg = BPF_REG_0; reg <= BPF_REG_5; reg++)
+ set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+ rvt[idx] |= RVT_DONE;
+ break;
+ default:
+ WARN(1, "Unhandled BPF_JMP case.\n");
+ rvt[idx] |= RVT_DONE;
+ break;
+ }
+ break;
+ default:
+ rvt[idx] |= RVT_DONE;
+ break;
+ }
+ }
+ return idx;
+}
+
+/*
+ * Track the value range (i.e. 32-bit vs. 64-bit) of each register at
+ * each eBPF insn. This allows unneeded sign and zero extension
+ * operations to be omitted.
+ *
+ * Doesn't handle yet confluence of control paths with conflicting
+ * ranges, but it is good enough for most sane code.
+ */
+static int reg_val_propagate(struct jit_ctx *ctx)
+{
+ const struct bpf_prog *prog = ctx->skf;
+ u64 exit_rvt;
+ int reg;
+ int i;
+
+ /*
+ * 11 registers * 3 bits/reg leaves top bits free for other
+ * uses. Bit-62..63 used to see if we have visited an insn.
+ */
+ exit_rvt = 0;
+
+ /* Upon entry, argument registers are 64-bit. */
+ for (reg = BPF_REG_1; reg <= BPF_REG_5; reg++)
+ set_reg_val_type(&exit_rvt, reg, REG_64BIT);
+
+ /*
+ * First follow all conditional branches on the fall-through
+ * edge of control flow..
+ */
+ reg_val_propagate_range(ctx, exit_rvt, 0, false);
+restart_search:
+ /*
+ * Then repeatedly find the first conditional branch where
+ * both edges of control flow have not been taken, and follow
+ * the branch taken edge. We will end up restarting the
+ * search once per conditional branch insn.
+ */
+ for (i = 0; i < prog->len; i++) {
+ u64 rvt = ctx->reg_val_types[i];
+
+ if ((rvt & RVT_VISITED_MASK) == RVT_DONE ||
+ (rvt & RVT_VISITED_MASK) == 0)
+ continue;
+ if ((rvt & RVT_VISITED_MASK) == RVT_FALL_THROUGH) {
+ reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, true);
+ } else { /* RVT_BRANCH_TAKEN */
+ WARN(1, "Unexpected RVT_BRANCH_TAKEN case.\n");
+ reg_val_propagate_range(ctx, rvt & ~RVT_VISITED_MASK, i, false);
+ }
+ goto restart_search;
+ }
+ /*
+ * Eventually all conditional branches have been followed on
+ * both branches and we are done. Any insn that has not been
+ * visited at this point is dead.
+ */
+
+ return 0;
+}
+
+static void jit_fill_hole(void *area, unsigned int size)
+{
+ u32 *p;
+
+ /* We are guaranteed to have aligned memory. */
+ for (p = area; size >= sizeof(u32); size -= sizeof(u32))
+ uasm_i_break(&p, BRK_BUG); /* Increments p */
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ struct bpf_prog *orig_prog = prog;
+ bool tmp_blinded = false;
+ struct bpf_prog *tmp;
+ struct bpf_binary_header *header = NULL;
+ struct jit_ctx ctx;
+ unsigned int image_size;
+ u8 *image_ptr;
+
+ if (!prog->jit_requested || !cpu_has_mips64r2)
+ return prog;
+
+ tmp = bpf_jit_blind_constants(prog);
+ /* If blinding was requested and we failed during blinding,
+ * we must fall back to the interpreter.
+ */
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
+
+ memset(&ctx, 0, sizeof(ctx));
+
+ preempt_disable();
+ switch (current_cpu_type()) {
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ ctx.use_bbit_insns = 1;
+ break;
+ default:
+ ctx.use_bbit_insns = 0;
+ }
+ preempt_enable();
+
+ ctx.offsets = kcalloc(prog->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
+ if (ctx.offsets == NULL)
+ goto out_err;
+
+ ctx.reg_val_types = kcalloc(prog->len + 1, sizeof(*ctx.reg_val_types), GFP_KERNEL);
+ if (ctx.reg_val_types == NULL)
+ goto out_err;
+
+ ctx.skf = prog;
+
+ if (reg_val_propagate(&ctx))
+ goto out_err;
+
+ /*
+ * First pass discovers used resources and instruction offsets
+ * assuming short branches are used.
+ */
+ if (build_int_body(&ctx))
+ goto out_err;
+
+ /*
+ * If no calls are made (EBPF_SAVE_RA), then tail call count
+ * in $v1, else we must save in n$s4.
+ */
+ if (ctx.flags & EBPF_SEEN_TC) {
+ if (ctx.flags & EBPF_SAVE_RA)
+ ctx.flags |= EBPF_SAVE_S4;
+ else
+ ctx.flags |= EBPF_TCC_IN_V1;
+ }
+
+ /*
+ * Second pass generates offsets, if any branches are out of
+ * range a jump-around long sequence is generated, and we have
+ * to try again from the beginning to generate the new
+ * offsets. This is done until no additional conversions are
+ * necessary.
+ */
+ do {
+ ctx.idx = 0;
+ ctx.gen_b_offsets = 1;
+ ctx.long_b_conversion = 0;
+ if (gen_int_prologue(&ctx))
+ goto out_err;
+ if (build_int_body(&ctx))
+ goto out_err;
+ if (build_int_epilogue(&ctx, MIPS_R_RA))
+ goto out_err;
+ } while (ctx.long_b_conversion);
+
+ image_size = 4 * ctx.idx;
+
+ header = bpf_jit_binary_alloc(image_size, &image_ptr,
+ sizeof(u32), jit_fill_hole);
+ if (header == NULL)
+ goto out_err;
+
+ ctx.target = (u32 *)image_ptr;
+
+ /* Third pass generates the code */
+ ctx.idx = 0;
+ if (gen_int_prologue(&ctx))
+ goto out_err;
+ if (build_int_body(&ctx))
+ goto out_err;
+ if (build_int_epilogue(&ctx, MIPS_R_RA))
+ goto out_err;
+
+ /* Update the icache */
+ flush_icache_range((unsigned long)ctx.target,
+ (unsigned long)&ctx.target[ctx.idx]);
+
+ if (bpf_jit_enable > 1)
+ /* Dump JIT code */
+ bpf_jit_dump(prog->len, image_size, 2, ctx.target);
+
+ bpf_jit_binary_lock_ro(header);
+ prog->bpf_func = (void *)ctx.target;
+ prog->jited = 1;
+ prog->jited_len = image_size;
+out_normal:
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ kfree(ctx.offsets);
+ kfree(ctx.reg_val_types);
+
+ return prog;
+
+out_err:
+ prog = orig_prog;
+ if (header)
+ bpf_jit_binary_free(header);
+ goto out_normal;
+}
diff --git a/arch/mips/netlogic/Kconfig b/arch/mips/netlogic/Kconfig
new file mode 100644
index 000000000..412351c5a
--- /dev/null
+++ b/arch/mips/netlogic/Kconfig
@@ -0,0 +1,86 @@
+# SPDX-License-Identifier: GPL-2.0
+if NLM_XLP_BOARD || NLM_XLR_BOARD
+
+if NLM_XLP_BOARD
+config DT_XLP_EVP
+ bool "Built-in device tree for XLP EVP boards"
+ default y
+ select BUILTIN_DTB
+ help
+ Add an FDT blob for XLP EVP boards into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_evp.dts
+
+config DT_XLP_SVP
+ bool "Built-in device tree for XLP SVP boards"
+ default y
+ select BUILTIN_DTB
+ help
+ Add an FDT blob for XLP VP boards into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_svp.dts
+
+config DT_XLP_FVP
+ bool "Built-in device tree for XLP FVP boards"
+ default y
+ select BUILTIN_DTB
+ help
+ Add an FDT blob for XLP FVP board into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_fvp.dts
+
+config DT_XLP_GVP
+ bool "Built-in device tree for XLP GVP boards"
+ default y
+ select BUILTIN_DTB
+ help
+ Add an FDT blob for XLP GVP board into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_gvp.dts
+
+config DT_XLP_RVP
+ bool "Built-in device tree for XLP RVP boards"
+ default y
+ help
+ Add an FDT blob for XLP RVP board into the kernel.
+ This DTB will be used if the firmware does not pass in a DTB
+ pointer to the kernel. The corresponding DTS file is at
+ arch/mips/netlogic/dts/xlp_rvp.dts
+
+config NLM_MULTINODE
+ bool "Support for multi-chip boards"
+ depends on NLM_XLP_BOARD
+ default n
+ help
+ Add support for boards with 2 or 4 XLPs connected over ICI.
+
+if NLM_MULTINODE
+choice
+ prompt "Number of XLPs on the board"
+ default NLM_MULTINODE_2
+ help
+ In the multi-node case, specify the number of SoCs on the board.
+
+config NLM_MULTINODE_2
+ bool "Dual-XLP board"
+ help
+ Support boards with upto two XLPs connected over ICI.
+
+config NLM_MULTINODE_4
+ bool "Quad-XLP board"
+ help
+ Support boards with upto four XLPs connected over ICI.
+
+endchoice
+
+endif
+endif
+
+config NLM_COMMON
+ bool
+
+endif
diff --git a/arch/mips/netlogic/Makefile b/arch/mips/netlogic/Makefile
new file mode 100644
index 000000000..36d169b2c
--- /dev/null
+++ b/arch/mips/netlogic/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_NLM_COMMON) += common/
+obj-$(CONFIG_CPU_XLR) += xlr/
+obj-$(CONFIG_CPU_XLP) += xlp/
diff --git a/arch/mips/netlogic/Platform b/arch/mips/netlogic/Platform
new file mode 100644
index 000000000..fb8eb4c0c
--- /dev/null
+++ b/arch/mips/netlogic/Platform
@@ -0,0 +1,17 @@
+#
+# NETLOGIC includes
+#
+cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/mach-netlogic
+cflags-$(CONFIG_NLM_COMMON) += -I$(srctree)/arch/mips/include/asm/netlogic
+
+#
+# use mips64 if xlr is not available
+#
+cflags-$(CONFIG_CPU_XLR) += $(call cc-option,-march=xlr,-march=mips64)
+cflags-$(CONFIG_CPU_XLP) += $(call cc-option,-march=xlp,-march=mips64r2)
+
+#
+# NETLOGIC processor support
+#
+platform-$(CONFIG_NLM_COMMON) += netlogic/
+load-$(CONFIG_NLM_COMMON) += 0xffffffff80100000
diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile
new file mode 100644
index 000000000..89f6e3f39
--- /dev/null
+++ b/arch/mips/netlogic/common/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += irq.o time.o
+obj-y += reset.o
+obj-$(CONFIG_SMP) += smp.o smpboot.o
+obj-$(CONFIG_EARLY_PRINTK) += earlycons.o
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
new file mode 100644
index 000000000..8f5bc1597
--- /dev/null
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/serial_reg.h>
+
+#include <asm/mipsregs.h>
+#include <asm/setup.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/uart.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#endif
+
+void prom_putchar(char c)
+{
+ uint64_t uartbase;
+
+#if defined(CONFIG_CPU_XLP)
+ uartbase = nlm_get_uart_regbase(0, 0);
+#elif defined(CONFIG_CPU_XLR)
+ uartbase = nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
+#endif
+ while ((nlm_read_reg(uartbase, UART_LSR) & UART_LSR_THRE) == 0)
+ ;
+ nlm_write_reg(uartbase, UART_TX, c);
+}
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
new file mode 100644
index 000000000..f4961bc9a
--- /dev/null
+++ b/arch/mips/netlogic/common/irq.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <linux/interrupt.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/errno.h>
+#include <asm/signal.h>
+#include <asm/ptrace.h>
+#include <asm/mipsregs.h>
+#include <asm/thread_info.h>
+
+#include <asm/netlogic/mips-extns.h>
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/fmn.h>
+#else
+#error "Unknown CPU"
+#endif
+
+#ifdef CONFIG_SMP
+#define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \
+ (1ULL << IRQ_IPI_SMP_RESCHEDULE))
+#else
+#define SMP_IRQ_MASK 0
+#endif
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+ (1ull << IRQ_FMN))
+
+struct nlm_pic_irq {
+ void (*extra_ack)(struct irq_data *);
+ struct nlm_soc_info *node;
+ int picirq;
+ int irt;
+ int flags;
+};
+
+static void xlp_pic_enable(struct irq_data *d)
+{
+ unsigned long flags;
+ struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_enable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
+}
+
+static void xlp_pic_disable(struct irq_data *d)
+{
+ struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+ unsigned long flags;
+
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_disable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
+}
+
+static void xlp_pic_mask_ack(struct irq_data *d)
+{
+ struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+ clear_c0_eimr(pd->picirq);
+ ack_c0_eirr(pd->picirq);
+}
+
+static void xlp_pic_unmask(struct irq_data *d)
+{
+ struct nlm_pic_irq *pd = irq_data_get_irq_chip_data(d);
+
+ BUG_ON(!pd);
+
+ if (pd->extra_ack)
+ pd->extra_ack(d);
+
+ /* re-enable the intr on this cpu */
+ set_c0_eimr(pd->picirq);
+
+ /* Ack is a single write, no need to lock */
+ nlm_pic_ack(pd->node->picbase, pd->irt);
+}
+
+static struct irq_chip xlp_pic = {
+ .name = "XLP-PIC",
+ .irq_enable = xlp_pic_enable,
+ .irq_disable = xlp_pic_disable,
+ .irq_mask_ack = xlp_pic_mask_ack,
+ .irq_unmask = xlp_pic_unmask,
+};
+
+static void cpuintr_disable(struct irq_data *d)
+{
+ clear_c0_eimr(d->irq);
+}
+
+static void cpuintr_enable(struct irq_data *d)
+{
+ set_c0_eimr(d->irq);
+}
+
+static void cpuintr_ack(struct irq_data *d)
+{
+ ack_c0_eirr(d->irq);
+}
+
+/*
+ * Chip definition for CPU originated interrupts(timer, msg) and
+ * IPIs
+ */
+struct irq_chip nlm_cpu_intr = {
+ .name = "XLP-CPU-INTR",
+ .irq_enable = cpuintr_enable,
+ .irq_disable = cpuintr_disable,
+ .irq_mask = cpuintr_disable,
+ .irq_ack = cpuintr_ack,
+ .irq_eoi = cpuintr_enable,
+};
+
+static void __init nlm_init_percpu_irqs(void)
+{
+ int i;
+
+ for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
+ irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
+#ifdef CONFIG_SMP
+ irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
+ nlm_smp_function_ipi_handler);
+ irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
+ nlm_smp_resched_ipi_handler);
+#endif
+}
+
+
+void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL);
+ BUG_ON(pic_data == NULL);
+ pic_data->irt = irt;
+ pic_data->picirq = picirq;
+ pic_data->node = nlm_get_node(node);
+ irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
+ irq_set_chip_data(xirq, pic_data);
+}
+
+void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = irq_get_chip_data(xirq);
+ if (WARN_ON(!pic_data))
+ return;
+ pic_data->extra_ack = xack;
+}
+
+static void nlm_init_node_irqs(int node)
+{
+ struct nlm_soc_info *nodep;
+ int i, irt;
+
+ pr_info("Init IRQ for node %d\n", node);
+ nodep = nlm_get_node(node);
+ nodep->irqmask = PERCPU_IRQ_MASK;
+ for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
+ irt = nlm_irq_to_irt(i);
+ if (irt == -1) /* unused irq */
+ continue;
+ nodep->irqmask |= 1ull << i;
+ if (irt == -2) /* not a direct PIC irq */
+ continue;
+
+ nlm_pic_init_irt(nodep->picbase, irt, i,
+ node * nlm_threads_per_node(), 0);
+ nlm_setup_pic_irq(node, i, i, irt);
+ }
+}
+
+void nlm_smp_irq_init(int hwtid)
+{
+ int cpu, node;
+
+ cpu = hwtid % nlm_threads_per_node();
+ node = hwtid / nlm_threads_per_node();
+
+ if (cpu == 0 && node != 0)
+ nlm_init_node_irqs(node);
+ write_c0_eimr(nlm_get_node(node)->irqmask);
+}
+
+asmlinkage void plat_irq_dispatch(void)
+{
+ uint64_t eirr;
+ int i, node;
+
+ node = nlm_nodeid();
+ eirr = read_c0_eirr_and_eimr();
+ if (eirr == 0)
+ return;
+
+ i = __ffs64(eirr);
+ /* per-CPU IRQs don't need translation */
+ if (i < PIC_IRQ_BASE) {
+ do_IRQ(i);
+ return;
+ }
+
+#if defined(CONFIG_PCI_MSI) && defined(CONFIG_CPU_XLP)
+ /* PCI interrupts need a second level dispatch for MSI bits */
+ if (i >= PIC_PCIE_LINK_MSI_IRQ(0) && i <= PIC_PCIE_LINK_MSI_IRQ(3)) {
+ nlm_dispatch_msi(node, i);
+ return;
+ }
+ if (i >= PIC_PCIE_MSIX_IRQ(0) && i <= PIC_PCIE_MSIX_IRQ(3)) {
+ nlm_dispatch_msix(node, i);
+ return;
+ }
+
+#endif
+ /* top level irq handling */
+ do_IRQ(nlm_irq_to_xirq(node, i));
+}
+
+#ifdef CONFIG_CPU_XLP
+static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init xlp_of_pic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ const int n_picirqs = PIC_IRT_LAST_IRQ - PIC_IRQ_BASE + 1;
+ struct irq_domain *xlp_pic_domain;
+ struct resource res;
+ int socid, ret, bus;
+
+ /* we need a hack to get the PIC's SoC chip id */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret < 0) {
+ pr_err("PIC %s: reg property not found!\n", node->name);
+ return -EINVAL;
+ }
+
+ if (cpu_is_xlp9xx()) {
+ bus = (res.start >> 20) & 0xf;
+ for (socid = 0; socid < NLM_NR_NODES; socid++) {
+ if (!nlm_node_present(socid))
+ continue;
+ if (nlm_get_node(socid)->socbus == bus)
+ break;
+ }
+ if (socid == NLM_NR_NODES) {
+ pr_err("PIC %s: Node mapping for bus %d not found!\n",
+ node->name, bus);
+ return -EINVAL;
+ }
+ } else {
+ socid = (res.start >> 18) & 0x3;
+ if (!nlm_node_present(socid)) {
+ pr_err("PIC %s: node %d does not exist!\n",
+ node->name, socid);
+ return -EINVAL;
+ }
+ }
+
+ if (!nlm_node_present(socid)) {
+ pr_err("PIC %s: node %d does not exist!\n", node->name, socid);
+ return -EINVAL;
+ }
+
+ xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
+ nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
+ &xlp_pic_irq_domain_ops, NULL);
+ if (xlp_pic_domain == NULL) {
+ pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
+ return -EINVAL;
+ }
+ pr_info("Node %d: IRQ domain created for PIC@%pR\n", socid, &res);
+ return 0;
+}
+
+static struct of_device_id __initdata xlp_pic_irq_ids[] = {
+ { .compatible = "netlogic,xlp-pic", .data = xlp_of_pic_init },
+ {},
+};
+#endif
+
+void __init arch_init_irq(void)
+{
+ /* Initialize the irq descriptors */
+ nlm_init_percpu_irqs();
+ nlm_init_node_irqs(0);
+ write_c0_eimr(nlm_current_node()->irqmask);
+#if defined(CONFIG_CPU_XLR)
+ nlm_setup_fmn_irq();
+#endif
+#ifdef CONFIG_CPU_XLP
+ of_irq_init(xlp_pic_irq_ids);
+#endif
+}
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
new file mode 100644
index 000000000..c474981a6
--- /dev/null
+++ b/arch/mips/netlogic/common/reset.S
@@ -0,0 +1,299 @@
+/*
+ * Copyright 2003-2013 Broadcom Corporation.
+ * All Rights Reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpu.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asmmacro.h>
+#include <asm/addrspace.h>
+
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/cpucontrol.h>
+
+#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
+ XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
+ SYS_CPU_NONCOHERENT_MODE * 4
+
+/* Enable XLP features and workarounds in the LSU */
+.macro xlp_config_lsu
+ li t0, LSU_DEFEATURE
+ mfcr t1, t0
+
+ lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
+ or t1, t1, t2
+ mtcr t1, t0
+
+ li t0, ICU_DEFEATURE
+ mfcr t1, t0
+ ori t1, 0x1000 /* Enable Icache partitioning */
+ mtcr t1, t0
+
+ li t0, SCHED_DEFEATURE
+ lui t1, 0x0100 /* Disable BRU accepting ALU ops */
+ mtcr t1, t0
+.endm
+
+/*
+ * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
+ * register. This is needed before going to C code since the SP can
+ * in this region. Called from all HW threads.
+ */
+.macro xlp_early_mmu_init
+ mfc0 t0, CP0_PAGEMASK, 1
+ li t1, (1 << 29) /* ELPA bit */
+ or t0, t1
+ mtc0 t0, CP0_PAGEMASK, 1
+.endm
+
+/*
+ * L1D cache has to be flushed before enabling threads in XLP.
+ * On XLP8xx/XLP3xx, we do a low level flush using processor control
+ * registers. On XLPII CPUs, usual cache instructions work.
+ */
+.macro xlp_flush_l1_dcache
+ mfc0 t0, CP0_PRID
+ andi t0, t0, PRID_IMP_MASK
+ slt t1, t0, 0x1200
+ beqz t1, 15f
+ nop
+
+ /* XLP8xx low level cache flush */
+ li t0, LSU_DEBUG_DATA0
+ li t1, LSU_DEBUG_ADDR
+ li t2, 0 /* index */
+ li t3, 0x1000 /* loop count */
+11:
+ sll v0, t2, 5
+ mtcr zero, t0
+ ori v1, v0, 0x3 /* way0 | write_enable | write_active */
+ mtcr v1, t1
+12:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 12b
+ nop
+ mtcr zero, t0
+ ori v1, v0, 0x7 /* way1 | write_enable | write_active */
+ mtcr v1, t1
+13:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 13b
+ nop
+ addi t2, 1
+ bne t3, t2, 11b
+ nop
+ b 17f
+ nop
+
+ /* XLPII CPUs, Invalidate all 64k of L1 D-cache */
+15:
+ li t0, 0x80000000
+ li t1, 0x80010000
+16: cache Index_Writeback_Inv_D, 0(t0)
+ addiu t0, t0, 32
+ bne t0, t1, 16b
+ nop
+17:
+.endm
+
+/*
+ * nlm_reset_entry will be copied to the reset entry point for
+ * XLR and XLP. The XLP cores start here when they are woken up. This
+ * is also the NMI entry point.
+ *
+ * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
+ *
+ * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
+ * location, this will have the thread mask (used when core is woken up)
+ * and the current NMI handler in case we reached here for an NMI.
+ *
+ * When a core or thread is newly woken up, it marks itself ready and
+ * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
+ * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
+ */
+ .set noreorder
+ .set noat
+ .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
+
+FEXPORT(nlm_reset_entry)
+ dmtc0 k0, $22, 6
+ dmtc0 k1, $22, 7
+ mfc0 k0, CP0_STATUS
+ li k1, 0x80000
+ and k1, k0, k1
+ beqz k1, 1f /* go to real reset entry */
+ nop
+ li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+ ld k0, BOOT_NMI_HANDLER(k1)
+ jr k0
+ nop
+
+1: /* Entry point on core wakeup */
+ mfc0 t0, CP0_PRID /* processor ID */
+ andi t0, PRID_IMP_MASK
+ li t1, 0x1500 /* XLP 9xx */
+ beq t0, t1, 2f /* does not need to set coherent */
+ nop
+
+ li t1, 0x1300 /* XLP 5xx */
+ beq t0, t1, 2f /* does not need to set coherent */
+ nop
+
+ /* set bit in SYS coherent register for the core */
+ mfc0 t0, CP0_EBASE
+ mfc0 t1, CP0_EBASE
+ srl t1, 5
+ andi t1, 0x3 /* t1 <- node */
+ li t2, 0x40000
+ mul t3, t2, t1 /* t3 = node * 0x40000 */
+ srl t0, t0, 2
+ and t0, t0, 0x7 /* t0 <- core */
+ li t1, 0x1
+ sll t0, t1, t0
+ nor t0, t0, zero /* t0 <- ~(1 << core) */
+ li t2, SYS_CPU_COHERENT_BASE
+ add t2, t2, t3 /* t2 <- SYS offset for node */
+ lw t1, 0(t2)
+ and t1, t1, t0
+ sw t1, 0(t2)
+
+ /* read back to ensure complete */
+ lw t1, 0(t2)
+ sync
+
+2:
+ /* Configure LSU on Non-0 Cores. */
+ xlp_config_lsu
+ /* FALL THROUGH */
+
+/*
+ * Wake up sibling threads from the initial thread in a core.
+ */
+EXPORT(nlm_boot_siblings)
+ /* core L1D flush before enable threads */
+ xlp_flush_l1_dcache
+ /* save ra and sp, will be used later (only for boot cpu) */
+ dmtc0 ra, $22, 6
+ dmtc0 sp, $22, 7
+ /* Enable hw threads by writing to MAP_THREADMODE of the core */
+ li t0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
+ li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
+ mfcr t2, t0
+ or t2, t2, t1
+ mtcr t2, t0
+
+ /*
+ * The new hardware thread starts at the next instruction
+ * For all the cases other than core 0 thread 0, we will
+ * jump to the secondary wait function.
+
+ * NOTE: All GPR contents are lost after the mtcr above!
+ */
+ mfc0 v0, CP0_EBASE
+ andi v0, 0x3ff /* v0 <- node/core */
+
+ /*
+ * Errata: to avoid potential live lock, setup IFU_BRUB_RESERVE
+ * when running 4 threads per core
+ */
+ andi v1, v0, 0x3 /* v1 <- thread id */
+ bnez v1, 2f
+ nop
+
+ /* thread 0 of each core. */
+ li t0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
+ subu t1, 0x3 /* 4-thread per core mode? */
+ bnez t1, 2f
+ nop
+
+ li t0, IFU_BRUB_RESERVE
+ li t1, 0x55
+ mtcr t1, t0
+ _ehb
+2:
+ beqz v0, 4f /* boot cpu (cpuid == 0)? */
+ nop
+
+ /* setup status reg */
+ move t1, zero
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
+
+ xlp_early_mmu_init
+
+ /* mark CPU ready */
+ li t3, CKSEG1ADDR(RESET_DATA_PHYS)
+ ADDIU t1, t3, BOOT_CPU_READY
+ sll v1, v0, 2
+ PTR_ADDU t1, v1
+ li t2, 1
+ sw t2, 0(t1)
+ /* Wait until NMI hits */
+3: wait
+ b 3b
+ nop
+
+ /*
+ * For the boot CPU, we have to restore ra and sp and return, rest
+ * of the registers will be restored by the caller
+ */
+4:
+ dmfc0 ra, $22, 6
+ dmfc0 sp, $22, 7
+ jr ra
+ nop
+EXPORT(nlm_reset_entry_end)
+
+LEAF(nlm_init_boot_cpu)
+#ifdef CONFIG_CPU_XLP
+ xlp_config_lsu
+ xlp_early_mmu_init
+#endif
+ jr ra
+ nop
+END(nlm_init_boot_cpu)
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
new file mode 100644
index 000000000..39a300bd6
--- /dev/null
+++ b/arch/mips/netlogic/common/smp.c
@@ -0,0 +1,285 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/sched/task_stack.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/mmu_context.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/mips-extns.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
+
+void nlm_send_ipi_single(int logical_cpu, unsigned int action)
+{
+ unsigned int hwtid;
+ uint64_t picbase;
+
+ /* node id is part of hwtid, and needed for send_ipi */
+ hwtid = cpu_logical_map(logical_cpu);
+ picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
+
+ if (action & SMP_CALL_FUNCTION)
+ nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_FUNCTION, 0);
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ nlm_pic_send_ipi(picbase, hwtid, IRQ_IPI_SMP_RESCHEDULE, 0);
+}
+
+void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ nlm_send_ipi_single(cpu, action);
+ }
+}
+
+/* IRQ_IPI_SMP_FUNCTION Handler */
+void nlm_smp_function_ipi_handler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
+ generic_smp_call_function_interrupt();
+ set_c0_eimr(irq);
+}
+
+/* IRQ_IPI_SMP_RESCHEDULE handler */
+void nlm_smp_resched_ipi_handler(struct irq_desc *desc)
+{
+ unsigned int irq = irq_desc_get_irq(desc);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
+ scheduler_ipi();
+ set_c0_eimr(irq);
+}
+
+/*
+ * Called before going into mips code, early cpu init
+ */
+void nlm_early_init_secondary(int cpu)
+{
+ change_c0_config(CONF_CM_CMASK, 0x3);
+#ifdef CONFIG_CPU_XLP
+ xlp_mmu_init();
+#endif
+ write_c0_ebase(nlm_current_node()->ebase);
+}
+
+/*
+ * Code to run on secondary just after probing the CPU
+ */
+static void nlm_init_secondary(void)
+{
+ int hwtid;
+
+ hwtid = hard_smp_processor_id();
+ cpu_set_core(&current_cpu_data, hwtid / NLM_THREADS_PER_CORE);
+ current_cpu_data.package = nlm_nodeid();
+ nlm_percpu_init(hwtid);
+ nlm_smp_irq_init(hwtid);
+}
+
+void nlm_prepare_cpus(unsigned int max_cpus)
+{
+ /* declare we are SMT capable */
+ smp_num_siblings = nlm_threads_per_core;
+}
+
+void nlm_smp_finish(void)
+{
+ local_irq_enable();
+}
+
+/*
+ * Boot all other cpus in the system, initialize them, and bring them into
+ * the boot function
+ */
+unsigned long nlm_next_gp;
+unsigned long nlm_next_sp;
+static cpumask_t phys_cpu_present_mask;
+
+int nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
+{
+ uint64_t picbase;
+ int hwtid;
+
+ hwtid = cpu_logical_map(logical_cpu);
+ picbase = nlm_get_node(nlm_hwtid_to_node(hwtid))->picbase;
+
+ nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
+ nlm_next_gp = (unsigned long)task_thread_info(idle);
+
+ /* barrier for sp/gp store above */
+ __sync();
+ nlm_pic_send_ipi(picbase, hwtid, 1, 1); /* NMI */
+
+ return 0;
+}
+
+void __init nlm_smp_setup(void)
+{
+ unsigned int boot_cpu;
+ int num_cpus, i, ncore, node;
+ volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
+
+ boot_cpu = hard_smp_processor_id();
+ cpumask_clear(&phys_cpu_present_mask);
+
+ cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
+ __cpu_number_map[boot_cpu] = 0;
+ __cpu_logical_map[0] = boot_cpu;
+ set_cpu_possible(0, true);
+
+ num_cpus = 1;
+ for (i = 0; i < NR_CPUS; i++) {
+ /*
+ * cpu_ready array is not set for the boot_cpu,
+ * it is only set for ASPs (see smpboot.S)
+ */
+ if (cpu_ready[i]) {
+ cpumask_set_cpu(i, &phys_cpu_present_mask);
+ __cpu_number_map[i] = num_cpus;
+ __cpu_logical_map[num_cpus] = i;
+ set_cpu_possible(num_cpus, true);
+ node = nlm_hwtid_to_node(i);
+ cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
+ ++num_cpus;
+ }
+ }
+
+ pr_info("Physical CPU mask: %*pb\n",
+ cpumask_pr_args(&phys_cpu_present_mask));
+ pr_info("Possible CPU mask: %*pb\n",
+ cpumask_pr_args(cpu_possible_mask));
+
+ /* check with the cores we have woken up */
+ for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
+ ncore += hweight32(nlm_get_node(i)->coremask);
+
+ pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
+ nlm_threads_per_core, num_cpus);
+
+ /* switch NMI handler to boot CPUs */
+ nlm_set_nmi_handler(nlm_boot_secondary_cpus);
+}
+
+static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
+{
+ uint32_t core0_thr_mask, core_thr_mask;
+ int threadmode, i, j;
+
+ core0_thr_mask = 0;
+ for (i = 0; i < NLM_THREADS_PER_CORE; i++)
+ if (cpumask_test_cpu(i, wakeup_mask))
+ core0_thr_mask |= (1 << i);
+ switch (core0_thr_mask) {
+ case 1:
+ nlm_threads_per_core = 1;
+ threadmode = 0;
+ break;
+ case 3:
+ nlm_threads_per_core = 2;
+ threadmode = 2;
+ break;
+ case 0xf:
+ nlm_threads_per_core = 4;
+ threadmode = 3;
+ break;
+ default:
+ goto unsupp;
+ }
+
+ /* Verify other cores CPU masks */
+ for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
+ core_thr_mask = 0;
+ for (j = 0; j < NLM_THREADS_PER_CORE; j++)
+ if (cpumask_test_cpu(i + j, wakeup_mask))
+ core_thr_mask |= (1 << j);
+ if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
+ goto unsupp;
+ }
+ return threadmode;
+
+unsupp:
+ panic("Unsupported CPU mask %*pb", cpumask_pr_args(wakeup_mask));
+ return 0;
+}
+
+int nlm_wakeup_secondary_cpus(void)
+{
+ u32 *reset_data;
+ int threadmode;
+
+ /* verify the mask and setup core config variables */
+ threadmode = nlm_parse_cpumask(&nlm_cpumask);
+
+ /* Setup CPU init parameters */
+ reset_data = nlm_get_boot_data(BOOT_THREAD_MODE);
+ *reset_data = threadmode;
+
+#ifdef CONFIG_CPU_XLP
+ xlp_wakeup_secondary_cpus();
+#else
+ xlr_wakeup_secondary_cpus();
+#endif
+ return 0;
+}
+
+const struct plat_smp_ops nlm_smp_ops = {
+ .send_ipi_single = nlm_send_ipi_single,
+ .send_ipi_mask = nlm_send_ipi_mask,
+ .init_secondary = nlm_init_secondary,
+ .smp_finish = nlm_smp_finish,
+ .boot_secondary = nlm_boot_secondary,
+ .smp_setup = nlm_smp_setup,
+ .prepare_cpus = nlm_prepare_cpus,
+};
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
new file mode 100644
index 000000000..509c1a7e7
--- /dev/null
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -0,0 +1,141 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asmmacro.h>
+#include <asm/addrspace.h>
+
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/cpucontrol.h>
+
+ .set noreorder
+ .set noat
+ .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
+
+/* Called by the boot cpu to wake up its sibling threads */
+NESTED(xlp_boot_core0_siblings, PT_SIZE, sp)
+ /* CPU register contents lost when enabling threads, save them first */
+ SAVE_ALL
+ sync
+ /* find the location to which nlm_boot_siblings was relocated */
+ li t0, CKSEG1ADDR(RESET_VEC_PHYS)
+ PTR_LA t1, nlm_reset_entry
+ PTR_LA t2, nlm_boot_siblings
+ dsubu t2, t1
+ daddu t2, t0
+ /* call it */
+ jalr t2
+ nop
+ RESTORE_ALL
+ jr ra
+ nop
+END(xlp_boot_core0_siblings)
+
+NESTED(nlm_boot_secondary_cpus, 16, sp)
+ /* Initialize CP0 Status */
+ move t1, zero
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
+ PTR_LA t1, nlm_next_sp
+ PTR_L sp, 0(t1)
+ PTR_LA t1, nlm_next_gp
+ PTR_L gp, 0(t1)
+
+ /* a0 has the processor id */
+ mfc0 a0, CP0_EBASE
+ andi a0, 0x3ff /* a0 <- node/core */
+ PTR_LA t0, nlm_early_init_secondary
+ jalr t0
+ nop
+
+ PTR_LA t0, smp_bootstrap
+ jr t0
+ nop
+END(nlm_boot_secondary_cpus)
+
+/*
+ * In case of RMIboot bootloader which is used on XLR boards, the CPUs
+ * be already woken up and waiting in bootloader code.
+ * This will get them out of the bootloader code and into linux. Needed
+ * because the bootloader area will be taken and initialized by linux.
+ */
+NESTED(nlm_rmiboot_preboot, 16, sp)
+ mfc0 t0, $15, 1 /* read ebase */
+ andi t0, 0x1f /* t0 has the processor_id() */
+ andi t2, t0, 0x3 /* thread num */
+ sll t0, 2 /* offset in cpu array */
+
+ li t3, CKSEG1ADDR(RESET_DATA_PHYS)
+ ADDIU t1, t3, BOOT_CPU_READY
+ ADDU t1, t0
+ li t3, 1
+ sw t3, 0(t1)
+
+ bnez t2, 1f /* skip thread programming */
+ nop /* for thread id != 0 */
+
+ /*
+ * XLR MMU setup only for first thread in core
+ */
+ li t0, 0x400
+ mfcr t1, t0
+ li t2, 6 /* XLR thread mode mask */
+ nor t3, t2, zero
+ and t2, t1, t2 /* t2 - current thread mode */
+ li v0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
+ sll v1, 1
+ beq v1, t2, 1f /* same as request value */
+ nop /* nothing to do */
+
+ and t2, t1, t3 /* mask out old thread mode */
+ or t1, t2, v1 /* put in new value */
+ mtcr t1, t0 /* update core control */
+
+ /* wait for NMI to hit */
+1: wait
+ b 1b
+ nop
+END(nlm_rmiboot_preboot)
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
new file mode 100644
index 000000000..cbbf0d482
--- /dev/null
+++ b/arch/mips/netlogic/common/time.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/init.h>
+
+#include <asm/time.h>
+#include <asm/cpu-features.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
+
+unsigned int get_c0_compare_int(void)
+{
+ return IRQ_TIMER;
+}
+
+static u64 nlm_get_pic_timer(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
+}
+
+static u64 nlm_get_pic_timer32(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER);
+}
+
+static struct clocksource csrc_pic = {
+ .name = "PIC",
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void nlm_init_pic_timer(void)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+ u32 picfreq;
+
+ nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
+ if (current_cpu_data.cputype == CPU_XLR) {
+ csrc_pic.mask = CLOCKSOURCE_MASK(32);
+ csrc_pic.read = nlm_get_pic_timer32;
+ } else {
+ csrc_pic.mask = CLOCKSOURCE_MASK(64);
+ csrc_pic.read = nlm_get_pic_timer;
+ }
+ csrc_pic.rating = 1000;
+ picfreq = pic_timer_freq();
+ clocksource_register_hz(&csrc_pic, picfreq);
+ pr_info("PIC clock source added, frequency %d\n", picfreq);
+}
+
+void __init plat_time_init(void)
+{
+ nlm_init_pic_timer();
+ mips_hpt_frequency = nlm_get_cpu_frequency();
+ if (current_cpu_type() == CPU_XLR)
+ preset_lpj = mips_hpt_frequency / (3 * HZ);
+ else
+ preset_lpj = mips_hpt_frequency / (2 * HZ);
+ pr_info("MIPS counter frequency [%ld]\n",
+ (unsigned long)mips_hpt_frequency);
+}
diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile
new file mode 100644
index 000000000..d62465717
--- /dev/null
+++ b/arch/mips/netlogic/xlp/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-y += setup.o nlm_hal.o cop2-ex.o dt.o
+obj-$(CONFIG_SMP) += wakeup.o
+ifdef CONFIG_USB
+obj-y += usb-init.o
+obj-y += usb-init-xlp2.o
+endif
+ifdef CONFIG_SATA_AHCI
+obj-y += ahci-init.o
+obj-y += ahci-init-xlp2.o
+endif
diff --git a/arch/mips/netlogic/xlp/ahci-init-xlp2.c b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
new file mode 100644
index 000000000..c11b9c7dc
--- /dev/null
+++ b/arch/mips/netlogic/xlp/ahci-init-xlp2.c
@@ -0,0 +1,390 @@
+/*
+ * Copyright (c) 2003-2014 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/bitops.h>
+#include <linux/pci_ids.h>
+#include <linux/nodemask.h>
+
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/mips-extns.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+
+#define SATA_CTL 0x0
+#define SATA_STATUS 0x1 /* Status Reg */
+#define SATA_INT 0x2 /* Interrupt Reg */
+#define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */
+#define SATA_BIU_TIMEOUT 0x4
+#define AXIWRSPERRLOG 0x5
+#define AXIRDSPERRLOG 0x6
+#define BiuTimeoutLow 0x7
+#define BiuTimeoutHi 0x8
+#define BiuSlvErLow 0x9
+#define BiuSlvErHi 0xa
+#define IO_CONFIG_SWAP_DIS 0xb
+#define CR_REG_TIMER 0xc
+#define CORE_ID 0xd
+#define AXI_SLAVE_OPT1 0xe
+#define PHY_MEM_ACCESS 0xf
+#define PHY0_CNTRL 0x10
+#define PHY0_STAT 0x11
+#define PHY0_RX_ALIGN 0x12
+#define PHY0_RX_EQ_LO 0x13
+#define PHY0_RX_EQ_HI 0x14
+#define PHY0_BIST_LOOP 0x15
+#define PHY1_CNTRL 0x16
+#define PHY1_STAT 0x17
+#define PHY1_RX_ALIGN 0x18
+#define PHY1_RX_EQ_LO 0x19
+#define PHY1_RX_EQ_HI 0x1a
+#define PHY1_BIST_LOOP 0x1b
+#define RdExBase 0x1c
+#define RdExLimit 0x1d
+#define CacheAllocBase 0x1e
+#define CacheAllocLimit 0x1f
+#define BiuSlaveCmdGstNum 0x20
+
+/*SATA_CTL Bits */
+#define SATA_RST_N BIT(0) /* Active low reset sata_core phy */
+#define SataCtlReserve0 BIT(1)
+#define M_CSYSREQ BIT(2) /* AXI master low power, not used */
+#define S_CSYSREQ BIT(3) /* AXI slave low power, not used */
+#define P0_CP_DET BIT(8) /* Reserved, bring in from pad */
+#define P0_MP_SW BIT(9) /* Mech Switch */
+#define P0_DISABLE BIT(10) /* disable p0 */
+#define P0_ACT_LED_EN BIT(11) /* Active LED enable */
+#define P0_IRST_HARD_SYNTH BIT(12) /* PHY hard synth reset */
+#define P0_IRST_HARD_TXRX BIT(13) /* PHY lane hard reset */
+#define P0_IRST_POR BIT(14) /* PHY power on reset*/
+#define P0_IPDTXL BIT(15) /* PHY Tx lane dis/power down */
+#define P0_IPDRXL BIT(16) /* PHY Rx lane dis/power down */
+#define P0_IPDIPDMSYNTH BIT(17) /* PHY synthesizer dis/porwer down */
+#define P0_CP_POD_EN BIT(18) /* CP_POD enable */
+#define P0_AT_BYPASS BIT(19) /* P0 address translation by pass */
+#define P1_CP_DET BIT(20) /* Reserved,Cold Detect */
+#define P1_MP_SW BIT(21) /* Mech Switch */
+#define P1_DISABLE BIT(22) /* disable p1 */
+#define P1_ACT_LED_EN BIT(23) /* Active LED enable */
+#define P1_IRST_HARD_SYNTH BIT(24) /* PHY hard synth reset */
+#define P1_IRST_HARD_TXRX BIT(25) /* PHY lane hard reset */
+#define P1_IRST_POR BIT(26) /* PHY power on reset*/
+#define P1_IPDTXL BIT(27) /* PHY Tx lane dis/porwer down */
+#define P1_IPDRXL BIT(28) /* PHY Rx lane dis/porwer down */
+#define P1_IPDIPDMSYNTH BIT(29) /* PHY synthesizer dis/porwer down */
+#define P1_CP_POD_EN BIT(30)
+#define P1_AT_BYPASS BIT(31) /* P1 address translation by pass */
+
+/* Status register */
+#define M_CACTIVE BIT(0) /* m_cactive, not used */
+#define S_CACTIVE BIT(1) /* s_cactive, not used */
+#define P0_PHY_READY BIT(8) /* phy is ready */
+#define P0_CP_POD BIT(9) /* Cold PowerOn */
+#define P0_SLUMBER BIT(10) /* power mode slumber */
+#define P0_PATIAL BIT(11) /* power mode patial */
+#define P0_PHY_SIG_DET BIT(12) /* phy dignal detect */
+#define P0_PHY_CALI BIT(13) /* phy calibration done */
+#define P1_PHY_READY BIT(16) /* phy is ready */
+#define P1_CP_POD BIT(17) /* Cold PowerOn */
+#define P1_SLUMBER BIT(18) /* power mode slumber */
+#define P1_PATIAL BIT(19) /* power mode patial */
+#define P1_PHY_SIG_DET BIT(20) /* phy dignal detect */
+#define P1_PHY_CALI BIT(21) /* phy calibration done */
+
+/* SATA CR_REG_TIMER bits */
+#define CR_TIME_SCALE (0x1000 << 0)
+
+/* SATA PHY specific registers start and end address */
+#define RXCDRCALFOSC0 0x0065
+#define CALDUTY 0x006e
+#define RXDPIF 0x8065
+#define PPMDRIFTMAX_HI 0x80A4
+
+#define nlm_read_sata_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_sata_pcibase(node) \
+ nlm_pcicfg_base(XLP9XX_IO_SATA_OFFSET(node))
+#define nlm_get_sata_regbase(node) \
+ (nlm_get_sata_pcibase(node) + 0x100)
+
+/* SATA PHY config for register block 1 0x0065 .. 0x006e */
+static const u8 sata_phy_config1[] = {
+ 0xC9, 0xC9, 0x07, 0x07, 0x18, 0x18, 0x01, 0x01, 0x22, 0x00
+};
+
+/* SATA PHY config for register block 2 0x8065 .. 0x80A4 */
+static const u8 sata_phy_config2[] = {
+ 0xAA, 0x00, 0x4C, 0xC9, 0xC9, 0x07, 0x07, 0x18,
+ 0x18, 0x05, 0x0C, 0x10, 0x00, 0x10, 0x00, 0xFF,
+ 0xCF, 0xF7, 0xE1, 0xF5, 0xFD, 0xFD, 0xFF, 0xFF,
+ 0xFF, 0xFF, 0xE3, 0xE7, 0xDB, 0xF5, 0xFD, 0xFD,
+ 0xF5, 0xF5, 0xFF, 0xFF, 0xE3, 0xE7, 0xDB, 0xF5,
+ 0xFD, 0xFD, 0xF5, 0xF5, 0xFF, 0xFF, 0xFF, 0xF5,
+ 0x3F, 0x00, 0x32, 0x00, 0x03, 0x01, 0x05, 0x05,
+ 0x04, 0x00, 0x00, 0x08, 0x04, 0x00, 0x00, 0x04,
+};
+
+const int sata_phy_debug = 0; /* set to verify PHY writes */
+
+static void sata_clear_glue_reg(u64 regbase, u32 off, u32 bit)
+{
+ u32 reg_val;
+
+ reg_val = nlm_read_sata_reg(regbase, off);
+ nlm_write_sata_reg(regbase, off, (reg_val & ~bit));
+}
+
+static void sata_set_glue_reg(u64 regbase, u32 off, u32 bit)
+{
+ u32 reg_val;
+
+ reg_val = nlm_read_sata_reg(regbase, off);
+ nlm_write_sata_reg(regbase, off, (reg_val | bit));
+}
+
+static void write_phy_reg(u64 regbase, u32 addr, u32 physel, u8 data)
+{
+ nlm_write_sata_reg(regbase, PHY_MEM_ACCESS,
+ (1u << 31) | (physel << 24) | (data << 16) | addr);
+ udelay(850);
+}
+
+static u8 read_phy_reg(u64 regbase, u32 addr, u32 physel)
+{
+ u32 val;
+
+ nlm_write_sata_reg(regbase, PHY_MEM_ACCESS,
+ (0 << 31) | (physel << 24) | (0 << 16) | addr);
+ udelay(850);
+ val = nlm_read_sata_reg(regbase, PHY_MEM_ACCESS);
+ return (val >> 16) & 0xff;
+}
+
+static void config_sata_phy(u64 regbase)
+{
+ u32 port, i, reg;
+ u8 val;
+
+ for (port = 0; port < 2; port++) {
+ for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
+ write_phy_reg(regbase, reg, port, sata_phy_config1[i]);
+
+ for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
+ write_phy_reg(regbase, reg, port, sata_phy_config2[i]);
+
+ /* Fix for PHY link up failures at lower temperatures */
+ write_phy_reg(regbase, 0x800F, port, 0x1f);
+
+ val = read_phy_reg(regbase, 0x0029, port);
+ write_phy_reg(regbase, 0x0029, port, val | (0x7 << 1));
+
+ val = read_phy_reg(regbase, 0x0056, port);
+ write_phy_reg(regbase, 0x0056, port, val & ~(1 << 3));
+
+ val = read_phy_reg(regbase, 0x0018, port);
+ write_phy_reg(regbase, 0x0018, port, val & ~(0x7 << 0));
+ }
+}
+
+static void check_phy_register(u64 regbase, u32 addr, u32 physel, u8 xdata)
+{
+ u8 data;
+
+ data = read_phy_reg(regbase, addr, physel);
+ pr_info("PHY read addr = 0x%x physel = %d data = 0x%x %s\n",
+ addr, physel, data, data == xdata ? "TRUE" : "FALSE");
+}
+
+static void verify_sata_phy_config(u64 regbase)
+{
+ u32 port, i, reg;
+
+ for (port = 0; port < 2; port++) {
+ for (i = 0, reg = RXCDRCALFOSC0; reg <= CALDUTY; reg++, i++)
+ check_phy_register(regbase, reg, port,
+ sata_phy_config1[i]);
+
+ for (i = 0, reg = RXDPIF; reg <= PPMDRIFTMAX_HI; reg++, i++)
+ check_phy_register(regbase, reg, port,
+ sata_phy_config2[i]);
+ }
+}
+
+static void nlm_sata_firmware_init(int node)
+{
+ u32 reg_val;
+ u64 regbase;
+ int n;
+
+ pr_info("Initializing XLP9XX On-chip AHCI...\n");
+ regbase = nlm_get_sata_regbase(node);
+
+ /* Reset port0 */
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_POR);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_TXRX);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_SYNTH);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDTXL);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDRXL);
+ sata_clear_glue_reg(regbase, SATA_CTL, P0_IPDIPDMSYNTH);
+
+ /* port1 */
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_POR);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_TXRX);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_SYNTH);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDTXL);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDRXL);
+ sata_clear_glue_reg(regbase, SATA_CTL, P1_IPDIPDMSYNTH);
+ udelay(300);
+
+ /* Set PHY */
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IPDTXL);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IPDRXL);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IPDIPDMSYNTH);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IPDTXL);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IPDRXL);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IPDIPDMSYNTH);
+
+ udelay(1000);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_POR);
+ udelay(1000);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_POR);
+ udelay(1000);
+
+ /* setup PHY */
+ config_sata_phy(regbase);
+ if (sata_phy_debug)
+ verify_sata_phy_config(regbase);
+
+ udelay(1000);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_TXRX);
+ sata_set_glue_reg(regbase, SATA_CTL, P0_IRST_HARD_SYNTH);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_TXRX);
+ sata_set_glue_reg(regbase, SATA_CTL, P1_IRST_HARD_SYNTH);
+ udelay(300);
+
+ /* Override reset in serial PHY mode */
+ sata_set_glue_reg(regbase, CR_REG_TIMER, CR_TIME_SCALE);
+ /* Set reset SATA */
+ sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N);
+ sata_set_glue_reg(regbase, SATA_CTL, M_CSYSREQ);
+ sata_set_glue_reg(regbase, SATA_CTL, S_CSYSREQ);
+
+ pr_debug("Waiting for PHYs to come up.\n");
+ n = 10000;
+ do {
+ reg_val = nlm_read_sata_reg(regbase, SATA_STATUS);
+ if ((reg_val & P1_PHY_READY) && (reg_val & P0_PHY_READY))
+ break;
+ udelay(10);
+ } while (--n > 0);
+
+ if (reg_val & P0_PHY_READY)
+ pr_info("PHY0 is up.\n");
+ else
+ pr_info("PHY0 is down.\n");
+ if (reg_val & P1_PHY_READY)
+ pr_info("PHY1 is up.\n");
+ else
+ pr_info("PHY1 is down.\n");
+
+ pr_info("XLP AHCI Init Done.\n");
+}
+
+static int __init nlm_ahci_init(void)
+{
+ int node;
+
+ if (!cpu_is_xlp9xx())
+ return 0;
+ for (node = 0; node < NLM_NR_NODES; node++)
+ if (nlm_node_present(node))
+ nlm_sata_firmware_init(node);
+ return 0;
+}
+
+static void nlm_sata_intr_ack(struct irq_data *data)
+{
+ u64 regbase;
+ u32 val;
+ int node;
+
+ node = data->irq / NLM_IRQS_PER_NODE;
+ regbase = nlm_get_sata_regbase(node);
+ val = nlm_read_sata_reg(regbase, SATA_INT);
+ sata_set_glue_reg(regbase, SATA_INT, val);
+}
+
+static void nlm_sata_fixup_bar(struct pci_dev *dev)
+{
+ dev->resource[5] = dev->resource[0];
+ memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
+}
+
+static void nlm_sata_fixup_final(struct pci_dev *dev)
+{
+ u32 val;
+ u64 regbase;
+ int node;
+
+ /* Find end bridge function to find node */
+ node = xlp_socdev_to_node(dev);
+ regbase = nlm_get_sata_regbase(node);
+
+ /* clear pending interrupts and then enable them */
+ val = nlm_read_sata_reg(regbase, SATA_INT);
+ sata_set_glue_reg(regbase, SATA_INT, val);
+
+ /* Enable only the core interrupt */
+ sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1);
+
+ dev->irq = nlm_irq_to_xirq(node, PIC_SATA_IRQ);
+ nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack);
+}
+
+arch_initcall(nlm_ahci_init);
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_SATA,
+ nlm_sata_fixup_bar);
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_SATA,
+ nlm_sata_fixup_final);
diff --git a/arch/mips/netlogic/xlp/ahci-init.c b/arch/mips/netlogic/xlp/ahci-init.c
new file mode 100644
index 000000000..92be1a325
--- /dev/null
+++ b/arch/mips/netlogic/xlp/ahci-init.c
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2003-2014 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/bitops.h>
+
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/mips-extns.h>
+
+#define SATA_CTL 0x0
+#define SATA_STATUS 0x1 /* Status Reg */
+#define SATA_INT 0x2 /* Interrupt Reg */
+#define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */
+#define SATA_CR_REG_TIMER 0x4 /* PHY Conrol Timer Reg */
+#define SATA_CORE_ID 0x5 /* Core ID Reg */
+#define SATA_AXI_SLAVE_OPT1 0x6 /* AXI Slave Options Reg */
+#define SATA_PHY_LOS_LEV 0x7 /* PHY LOS Level Reg */
+#define SATA_PHY_MULTI 0x8 /* PHY Multiplier Reg */
+#define SATA_PHY_CLK_SEL 0x9 /* Clock Select Reg */
+#define SATA_PHY_AMP1_GEN1 0xa /* PHY Transmit Amplitude Reg 1 */
+#define SATA_PHY_AMP1_GEN2 0xb /* PHY Transmit Amplitude Reg 2 */
+#define SATA_PHY_AMP1_GEN3 0xc /* PHY Transmit Amplitude Reg 3 */
+#define SATA_PHY_PRE1 0xd /* PHY Transmit Preemphasis Reg 1 */
+#define SATA_PHY_PRE2 0xe /* PHY Transmit Preemphasis Reg 2 */
+#define SATA_PHY_PRE3 0xf /* PHY Transmit Preemphasis Reg 3 */
+#define SATA_SPDMODE 0x10 /* Speed Mode Reg */
+#define SATA_REFCLK 0x11 /* Reference Clock Control Reg */
+#define SATA_BYTE_SWAP_DIS 0x12 /* byte swap disable */
+
+/*SATA_CTL Bits */
+#define SATA_RST_N BIT(0)
+#define PHY0_RESET_N BIT(16)
+#define PHY1_RESET_N BIT(17)
+#define PHY2_RESET_N BIT(18)
+#define PHY3_RESET_N BIT(19)
+#define M_CSYSREQ BIT(2)
+#define S_CSYSREQ BIT(3)
+
+/*SATA_STATUS Bits */
+#define P0_PHY_READY BIT(4)
+#define P1_PHY_READY BIT(5)
+#define P2_PHY_READY BIT(6)
+#define P3_PHY_READY BIT(7)
+
+#define nlm_read_sata_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_sata_pcibase(node) \
+ nlm_pcicfg_base(XLP_IO_SATA_OFFSET(node))
+/* SATA device specific configuration registers are starts at 0x900 offset */
+#define nlm_get_sata_regbase(node) \
+ (nlm_get_sata_pcibase(node) + 0x900)
+
+static void sata_clear_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit)
+{
+ uint32_t reg_val;
+
+ reg_val = nlm_read_sata_reg(regbase, off);
+ nlm_write_sata_reg(regbase, off, (reg_val & ~bit));
+}
+
+static void sata_set_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit)
+{
+ uint32_t reg_val;
+
+ reg_val = nlm_read_sata_reg(regbase, off);
+ nlm_write_sata_reg(regbase, off, (reg_val | bit));
+}
+
+static void nlm_sata_firmware_init(int node)
+{
+ uint32_t reg_val;
+ uint64_t regbase;
+ int i;
+
+ pr_info("XLP AHCI Initialization started.\n");
+ regbase = nlm_get_sata_regbase(node);
+
+ /* Reset SATA */
+ sata_clear_glue_reg(regbase, SATA_CTL, SATA_RST_N);
+ /* Reset PHY */
+ sata_clear_glue_reg(regbase, SATA_CTL,
+ (PHY3_RESET_N | PHY2_RESET_N
+ | PHY1_RESET_N | PHY0_RESET_N));
+
+ /* Set SATA */
+ sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N);
+ /* Set PHY */
+ sata_set_glue_reg(regbase, SATA_CTL,
+ (PHY3_RESET_N | PHY2_RESET_N
+ | PHY1_RESET_N | PHY0_RESET_N));
+
+ pr_debug("Waiting for PHYs to come up.\n");
+ i = 0;
+ do {
+ reg_val = nlm_read_sata_reg(regbase, SATA_STATUS);
+ i++;
+ } while (((reg_val & 0xF0) != 0xF0) && (i < 10000));
+
+ for (i = 0; i < 4; i++) {
+ if (reg_val & (P0_PHY_READY << i))
+ pr_info("PHY%d is up.\n", i);
+ else
+ pr_info("PHY%d is down.\n", i);
+ }
+
+ pr_info("XLP AHCI init done.\n");
+}
+
+static int __init nlm_ahci_init(void)
+{
+ int node = 0;
+ int chip = read_c0_prid() & PRID_IMP_MASK;
+
+ if (chip == PRID_IMP_NETLOGIC_XLP3XX)
+ nlm_sata_firmware_init(node);
+ return 0;
+}
+
+static void nlm_sata_intr_ack(struct irq_data *data)
+{
+ uint32_t val = 0;
+ uint64_t regbase;
+
+ regbase = nlm_get_sata_regbase(nlm_nodeid());
+ val = nlm_read_sata_reg(regbase, SATA_INT);
+ sata_set_glue_reg(regbase, SATA_INT, val);
+}
+
+static void nlm_sata_fixup_bar(struct pci_dev *dev)
+{
+ /*
+ * The AHCI resource is in BAR 0, move it to
+ * BAR 5, where it is expected
+ */
+ dev->resource[5] = dev->resource[0];
+ memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
+}
+
+static void nlm_sata_fixup_final(struct pci_dev *dev)
+{
+ uint32_t val;
+ uint64_t regbase;
+ int node = 0; /* XLP3XX does not support multi-node */
+
+ regbase = nlm_get_sata_regbase(node);
+
+ /* clear pending interrupts and then enable them */
+ val = nlm_read_sata_reg(regbase, SATA_INT);
+ sata_set_glue_reg(regbase, SATA_INT, val);
+
+ /* Mask the core interrupt. If all the interrupts
+ * are enabled there are spurious interrupt flow
+ * happening, to avoid only enable core interrupt
+ * mask.
+ */
+ sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1);
+
+ dev->irq = PIC_SATA_IRQ;
+ nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack);
+}
+
+arch_initcall(nlm_ahci_init);
+
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA,
+ nlm_sata_fixup_bar);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA,
+ nlm_sata_fixup_final);
diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c
new file mode 100644
index 000000000..21e439b3d
--- /dev/null
+++ b/arch/mips/netlogic/xlp/cop2-ex.c
@@ -0,0 +1,121 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Broadcom Corporation.
+ *
+ * based on arch/mips/cavium-octeon/cpu.c
+ * Copyright (C) 2009 Wind River Systems,
+ * written by Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/capability.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/notifier.h>
+#include <linux/prefetch.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cop2.h>
+#include <asm/current.h>
+#include <asm/mipsregs.h>
+#include <asm/page.h>
+
+#include <asm/netlogic/mips-extns.h>
+
+/*
+ * 64 bit ops are done in inline assembly to support 32 bit
+ * compilation
+ */
+void nlm_cop2_save(struct nlm_cop2_state *r)
+{
+ asm volatile(
+ ".set push\n"
+ ".set noat\n"
+ "dmfc2 $1, $0, 0\n"
+ "sd $1, 0(%1)\n"
+ "dmfc2 $1, $0, 1\n"
+ "sd $1, 8(%1)\n"
+ "dmfc2 $1, $0, 2\n"
+ "sd $1, 16(%1)\n"
+ "dmfc2 $1, $0, 3\n"
+ "sd $1, 24(%1)\n"
+ "dmfc2 $1, $1, 0\n"
+ "sd $1, 0(%2)\n"
+ "dmfc2 $1, $1, 1\n"
+ "sd $1, 8(%2)\n"
+ "dmfc2 $1, $1, 2\n"
+ "sd $1, 16(%2)\n"
+ "dmfc2 $1, $1, 3\n"
+ "sd $1, 24(%2)\n"
+ ".set pop\n"
+ : "=m"(*r)
+ : "r"(r->tx), "r"(r->rx));
+
+ r->tx_msg_status = __read_32bit_c2_register($2, 0);
+ r->rx_msg_status = __read_32bit_c2_register($3, 0) & 0x0fffffff;
+}
+
+void nlm_cop2_restore(struct nlm_cop2_state *r)
+{
+ u32 rstat;
+
+ asm volatile(
+ ".set push\n"
+ ".set noat\n"
+ "ld $1, 0(%1)\n"
+ "dmtc2 $1, $0, 0\n"
+ "ld $1, 8(%1)\n"
+ "dmtc2 $1, $0, 1\n"
+ "ld $1, 16(%1)\n"
+ "dmtc2 $1, $0, 2\n"
+ "ld $1, 24(%1)\n"
+ "dmtc2 $1, $0, 3\n"
+ "ld $1, 0(%2)\n"
+ "dmtc2 $1, $1, 0\n"
+ "ld $1, 8(%2)\n"
+ "dmtc2 $1, $1, 1\n"
+ "ld $1, 16(%2)\n"
+ "dmtc2 $1, $1, 2\n"
+ "ld $1, 24(%2)\n"
+ "dmtc2 $1, $1, 3\n"
+ ".set pop\n"
+ : : "m"(*r), "r"(r->tx), "r"(r->rx));
+
+ __write_32bit_c2_register($2, 0, r->tx_msg_status);
+ rstat = __read_32bit_c2_register($3, 0) & 0xf0000000u;
+ __write_32bit_c2_register($3, 0, r->rx_msg_status | rstat);
+}
+
+static int nlm_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ unsigned long flags;
+ unsigned int status;
+
+ switch (action) {
+ case CU2_EXCEPTION:
+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
+ break;
+ local_irq_save(flags);
+ KSTK_STATUS(current) |= ST0_CU2;
+ status = read_c0_status();
+ write_c0_status(status | ST0_CU2);
+ nlm_cop2_restore(&(current->thread.cp2));
+ write_c0_status(status & ~ST0_CU2);
+ local_irq_restore(flags);
+ pr_info("COP2 access enabled for pid %d (%s)\n",
+ current->pid, current->comm);
+ return NOTIFY_BAD; /* Don't call default notifier */
+ }
+
+ return NOTIFY_OK; /* Let default notifier send signals */
+}
+
+static int __init nlm_cu2_setup(void)
+{
+ return cu2_notifier(nlm_cu2_call, 0);
+}
+early_initcall(nlm_cu2_setup);
diff --git a/arch/mips/netlogic/xlp/dt.c b/arch/mips/netlogic/xlp/dt.c
new file mode 100644
index 000000000..b5ba83f4c
--- /dev/null
+++ b/arch/mips/netlogic/xlp/dt.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2003-2013 Broadcom Corporation.
+ * All Rights Reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/of_device.h>
+
+#include <asm/prom.h>
+
+extern u32 __dtb_xlp_evp_begin[], __dtb_xlp_svp_begin[], __dtb_xlp_fvp_begin[],
+ __dtb_xlp_gvp_begin[], __dtb_xlp_rvp_begin[];
+static void *xlp_fdt_blob;
+
+void __init *xlp_dt_init(void *fdtp)
+{
+ if (!fdtp) {
+ switch (current_cpu_data.processor_id & PRID_IMP_MASK) {
+#ifdef CONFIG_DT_XLP_RVP
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ fdtp = __dtb_xlp_rvp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_GVP
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ fdtp = __dtb_xlp_gvp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_FVP
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ fdtp = __dtb_xlp_fvp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_SVP
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ fdtp = __dtb_xlp_svp_begin;
+ break;
+#endif
+#ifdef CONFIG_DT_XLP_EVP
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ fdtp = __dtb_xlp_evp_begin;
+ break;
+#endif
+ default:
+ /* Pick a built-in if any, and hope for the best */
+ fdtp = __dtb_start;
+ break;
+ }
+ }
+ xlp_fdt_blob = fdtp;
+ return fdtp;
+}
+
+void __init xlp_early_init_devtree(void)
+{
+ __dt_setup_arch(xlp_fdt_blob);
+}
+
+void __init device_tree_init(void)
+{
+ unflatten_and_copy_device_tree();
+}
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
new file mode 100644
index 000000000..25ee69489
--- /dev/null
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -0,0 +1,508 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+
+#include <asm/mipsregs.h>
+#include <asm/time.h>
+
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/bridge.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+
+/* Main initialization */
+void nlm_node_init(int node)
+{
+ struct nlm_soc_info *nodep;
+
+ nodep = nlm_get_node(node);
+ if (node == 0)
+ nodep->coremask = 1; /* node 0, boot cpu */
+ nodep->sysbase = nlm_get_sys_regbase(node);
+ nodep->picbase = nlm_get_pic_regbase(node);
+ nodep->ebase = read_c0_ebase() & MIPS_EBASE_BASE;
+ if (cpu_is_xlp9xx())
+ nodep->socbus = xlp9xx_get_socbus(node);
+ else
+ nodep->socbus = 0;
+ spin_lock_init(&nodep->piclock);
+}
+
+static int xlp9xx_irq_to_irt(int irq)
+{
+ switch (irq) {
+ case PIC_GPIO_IRQ:
+ return 12;
+ case PIC_I2C_0_IRQ:
+ return 125;
+ case PIC_I2C_1_IRQ:
+ return 126;
+ case PIC_I2C_2_IRQ:
+ return 127;
+ case PIC_I2C_3_IRQ:
+ return 128;
+ case PIC_9XX_XHCI_0_IRQ:
+ return 114;
+ case PIC_9XX_XHCI_1_IRQ:
+ return 115;
+ case PIC_9XX_XHCI_2_IRQ:
+ return 116;
+ case PIC_UART_0_IRQ:
+ return 133;
+ case PIC_UART_1_IRQ:
+ return 134;
+ case PIC_SATA_IRQ:
+ return 143;
+ case PIC_NAND_IRQ:
+ return 151;
+ case PIC_SPI_IRQ:
+ return 152;
+ case PIC_MMC_IRQ:
+ return 153;
+ case PIC_PCIE_LINK_LEGACY_IRQ(0):
+ case PIC_PCIE_LINK_LEGACY_IRQ(1):
+ case PIC_PCIE_LINK_LEGACY_IRQ(2):
+ case PIC_PCIE_LINK_LEGACY_IRQ(3):
+ return 191 + irq - PIC_PCIE_LINK_LEGACY_IRQ_BASE;
+ }
+ return -1;
+}
+
+static int xlp_irq_to_irt(int irq)
+{
+ uint64_t pcibase;
+ int devoff, irt;
+
+ devoff = 0;
+ switch (irq) {
+ case PIC_UART_0_IRQ:
+ devoff = XLP_IO_UART0_OFFSET(0);
+ break;
+ case PIC_UART_1_IRQ:
+ devoff = XLP_IO_UART1_OFFSET(0);
+ break;
+ case PIC_MMC_IRQ:
+ devoff = XLP_IO_MMC_OFFSET(0);
+ break;
+ case PIC_I2C_0_IRQ: /* I2C will be fixed up */
+ case PIC_I2C_1_IRQ:
+ case PIC_I2C_2_IRQ:
+ case PIC_I2C_3_IRQ:
+ if (cpu_is_xlpii())
+ devoff = XLP2XX_IO_I2C_OFFSET(0);
+ else
+ devoff = XLP_IO_I2C0_OFFSET(0);
+ break;
+ case PIC_SATA_IRQ:
+ devoff = XLP_IO_SATA_OFFSET(0);
+ break;
+ case PIC_GPIO_IRQ:
+ devoff = XLP_IO_GPIO_OFFSET(0);
+ break;
+ case PIC_NAND_IRQ:
+ devoff = XLP_IO_NAND_OFFSET(0);
+ break;
+ case PIC_SPI_IRQ:
+ devoff = XLP_IO_SPI_OFFSET(0);
+ break;
+ default:
+ if (cpu_is_xlpii()) {
+ switch (irq) {
+ /* XLP2XX has three XHCI USB controller */
+ case PIC_2XX_XHCI_0_IRQ:
+ devoff = XLP2XX_IO_USB_XHCI0_OFFSET(0);
+ break;
+ case PIC_2XX_XHCI_1_IRQ:
+ devoff = XLP2XX_IO_USB_XHCI1_OFFSET(0);
+ break;
+ case PIC_2XX_XHCI_2_IRQ:
+ devoff = XLP2XX_IO_USB_XHCI2_OFFSET(0);
+ break;
+ }
+ } else {
+ switch (irq) {
+ case PIC_EHCI_0_IRQ:
+ devoff = XLP_IO_USB_EHCI0_OFFSET(0);
+ break;
+ case PIC_EHCI_1_IRQ:
+ devoff = XLP_IO_USB_EHCI1_OFFSET(0);
+ break;
+ case PIC_OHCI_0_IRQ:
+ devoff = XLP_IO_USB_OHCI0_OFFSET(0);
+ break;
+ case PIC_OHCI_1_IRQ:
+ devoff = XLP_IO_USB_OHCI1_OFFSET(0);
+ break;
+ case PIC_OHCI_2_IRQ:
+ devoff = XLP_IO_USB_OHCI2_OFFSET(0);
+ break;
+ case PIC_OHCI_3_IRQ:
+ devoff = XLP_IO_USB_OHCI3_OFFSET(0);
+ break;
+ }
+ }
+ }
+
+ if (devoff != 0) {
+ uint32_t val;
+
+ pcibase = nlm_pcicfg_base(devoff);
+ val = nlm_read_reg(pcibase, XLP_PCI_IRTINFO_REG);
+ if (val == 0xffffffff) {
+ irt = -1;
+ } else {
+ irt = val & 0xffff;
+ /* HW weirdness, I2C IRT entry has to be fixed up */
+ switch (irq) {
+ case PIC_I2C_1_IRQ:
+ irt = irt + 1; break;
+ case PIC_I2C_2_IRQ:
+ irt = irt + 2; break;
+ case PIC_I2C_3_IRQ:
+ irt = irt + 3; break;
+ }
+ }
+ } else if (irq >= PIC_PCIE_LINK_LEGACY_IRQ(0) &&
+ irq <= PIC_PCIE_LINK_LEGACY_IRQ(3)) {
+ /* HW bug, PCI IRT entries are bad on early silicon, fix */
+ irt = PIC_IRT_PCIE_LINK_INDEX(irq -
+ PIC_PCIE_LINK_LEGACY_IRQ_BASE);
+ } else {
+ irt = -1;
+ }
+ return irt;
+}
+
+int nlm_irq_to_irt(int irq)
+{
+ /* return -2 for irqs without 1-1 mapping */
+ if (irq >= PIC_PCIE_LINK_MSI_IRQ(0) && irq <= PIC_PCIE_LINK_MSI_IRQ(3))
+ return -2;
+ if (irq >= PIC_PCIE_MSIX_IRQ(0) && irq <= PIC_PCIE_MSIX_IRQ(3))
+ return -2;
+
+ if (cpu_is_xlp9xx())
+ return xlp9xx_irq_to_irt(irq);
+ else
+ return xlp_irq_to_irt(irq);
+}
+
+static unsigned int nlm_xlp2_get_core_frequency(int node, int core)
+{
+ unsigned int pll_post_div, ctrl_val0, ctrl_val1, denom;
+ uint64_t num, sysbase, clockbase;
+
+ if (cpu_is_xlp9xx()) {
+ clockbase = nlm_get_clock_regbase(node);
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_CPU_PLL_CTRL0(core));
+ ctrl_val1 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_CPU_PLL_CTRL1(core));
+ } else {
+ sysbase = nlm_get_node(node)->sysbase;
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_CPU_PLL_CTRL0(core));
+ ctrl_val1 = nlm_read_sys_reg(sysbase,
+ SYS_CPU_PLL_CTRL1(core));
+ }
+
+ /* Find PLL post divider value */
+ switch ((ctrl_val0 >> 24) & 0x7) {
+ case 1:
+ pll_post_div = 2;
+ break;
+ case 3:
+ pll_post_div = 4;
+ break;
+ case 7:
+ pll_post_div = 8;
+ break;
+ case 6:
+ pll_post_div = 16;
+ break;
+ case 0:
+ default:
+ pll_post_div = 1;
+ break;
+ }
+
+ num = 1000000ULL * (400 * 3 + 100 * (ctrl_val1 & 0x3f));
+ denom = 3 * pll_post_div;
+ do_div(num, denom);
+
+ return (unsigned int)num;
+}
+
+static unsigned int nlm_xlp_get_core_frequency(int node, int core)
+{
+ unsigned int pll_divf, pll_divr, dfs_div, ext_div;
+ unsigned int rstval, dfsval, denom;
+ uint64_t num, sysbase;
+
+ sysbase = nlm_get_node(node)->sysbase;
+ rstval = nlm_read_sys_reg(sysbase, SYS_POWER_ON_RESET_CFG);
+ dfsval = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIV_VALUE);
+ pll_divf = ((rstval >> 10) & 0x7f) + 1;
+ pll_divr = ((rstval >> 8) & 0x3) + 1;
+ ext_div = ((rstval >> 30) & 0x3) + 1;
+ dfs_div = ((dfsval >> (core * 4)) & 0xf) + 1;
+
+ num = 800000000ULL * pll_divf;
+ denom = 3 * pll_divr * ext_div * dfs_div;
+ do_div(num, denom);
+
+ return (unsigned int)num;
+}
+
+unsigned int nlm_get_core_frequency(int node, int core)
+{
+ if (cpu_is_xlpii())
+ return nlm_xlp2_get_core_frequency(node, core);
+ else
+ return nlm_xlp_get_core_frequency(node, core);
+}
+
+/*
+ * Calculate PIC frequency from PLL registers.
+ * freq_out = (ref_freq/2 * (6 + ctrl2[7:0]) + ctrl2[20:8]/2^13) /
+ * ((2^ctrl0[7:5]) * Table(ctrl0[26:24]))
+ */
+static unsigned int nlm_xlp2_get_pic_frequency(int node)
+{
+ u32 ctrl_val0, ctrl_val2, vco_post_div, pll_post_div, cpu_xlp9xx;
+ u32 mdiv, fdiv, pll_out_freq_den, reg_select, ref_div, pic_div;
+ u64 sysbase, pll_out_freq_num, ref_clk_select, clockbase, ref_clk;
+
+ sysbase = nlm_get_node(node)->sysbase;
+ clockbase = nlm_get_clock_regbase(node);
+ cpu_xlp9xx = cpu_is_xlp9xx();
+
+ /* Find ref_clk_base */
+ if (cpu_xlp9xx)
+ ref_clk_select = (nlm_read_sys_reg(sysbase,
+ SYS_9XX_POWER_ON_RESET_CFG) >> 18) & 0x3;
+ else
+ ref_clk_select = (nlm_read_sys_reg(sysbase,
+ SYS_POWER_ON_RESET_CFG) >> 18) & 0x3;
+ switch (ref_clk_select) {
+ case 0:
+ ref_clk = 200000000ULL;
+ ref_div = 3;
+ break;
+ case 1:
+ ref_clk = 100000000ULL;
+ ref_div = 1;
+ break;
+ case 2:
+ ref_clk = 125000000ULL;
+ ref_div = 1;
+ break;
+ case 3:
+ ref_clk = 400000000ULL;
+ ref_div = 3;
+ break;
+ }
+
+ /* Find the clock source PLL device for PIC */
+ if (cpu_xlp9xx) {
+ reg_select = nlm_read_sys_reg(clockbase,
+ SYS_9XX_CLK_DEV_SEL_REG) & 0x3;
+ switch (reg_select) {
+ case 0:
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL0);
+ ctrl_val2 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL2);
+ break;
+ case 1:
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL0_DEVX(0));
+ ctrl_val2 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL2_DEVX(0));
+ break;
+ case 2:
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL0_DEVX(1));
+ ctrl_val2 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL2_DEVX(1));
+ break;
+ case 3:
+ ctrl_val0 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL0_DEVX(2));
+ ctrl_val2 = nlm_read_sys_reg(clockbase,
+ SYS_9XX_PLL_CTRL2_DEVX(2));
+ break;
+ }
+ } else {
+ reg_select = (nlm_read_sys_reg(sysbase,
+ SYS_CLK_DEV_SEL_REG) >> 22) & 0x3;
+ switch (reg_select) {
+ case 0:
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL0);
+ ctrl_val2 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL2);
+ break;
+ case 1:
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL0_DEVX(0));
+ ctrl_val2 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL2_DEVX(0));
+ break;
+ case 2:
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL0_DEVX(1));
+ ctrl_val2 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL2_DEVX(1));
+ break;
+ case 3:
+ ctrl_val0 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL0_DEVX(2));
+ ctrl_val2 = nlm_read_sys_reg(sysbase,
+ SYS_PLL_CTRL2_DEVX(2));
+ break;
+ }
+ }
+
+ vco_post_div = (ctrl_val0 >> 5) & 0x7;
+ pll_post_div = (ctrl_val0 >> 24) & 0x7;
+ mdiv = ctrl_val2 & 0xff;
+ fdiv = (ctrl_val2 >> 8) & 0x1fff;
+
+ /* Find PLL post divider value */
+ switch (pll_post_div) {
+ case 1:
+ pll_post_div = 2;
+ break;
+ case 3:
+ pll_post_div = 4;
+ break;
+ case 7:
+ pll_post_div = 8;
+ break;
+ case 6:
+ pll_post_div = 16;
+ break;
+ case 0:
+ default:
+ pll_post_div = 1;
+ break;
+ }
+
+ fdiv = fdiv/(1 << 13);
+ pll_out_freq_num = ((ref_clk >> 1) * (6 + mdiv)) + fdiv;
+ pll_out_freq_den = (1 << vco_post_div) * pll_post_div * ref_div;
+
+ if (pll_out_freq_den > 0)
+ do_div(pll_out_freq_num, pll_out_freq_den);
+
+ /* PIC post divider, which happens after PLL */
+ if (cpu_xlp9xx)
+ pic_div = nlm_read_sys_reg(clockbase,
+ SYS_9XX_CLK_DEV_DIV_REG) & 0x3;
+ else
+ pic_div = (nlm_read_sys_reg(sysbase,
+ SYS_CLK_DEV_DIV_REG) >> 22) & 0x3;
+ do_div(pll_out_freq_num, 1 << pic_div);
+
+ return pll_out_freq_num;
+}
+
+unsigned int nlm_get_pic_frequency(int node)
+{
+ if (cpu_is_xlpii())
+ return nlm_xlp2_get_pic_frequency(node);
+ else
+ return 133333333;
+}
+
+unsigned int nlm_get_cpu_frequency(void)
+{
+ return nlm_get_core_frequency(0, 0);
+}
+
+/*
+ * Fills upto 8 pairs of entries containing the DRAM map of a node
+ * if node < 0, get dram map for all nodes
+ */
+int nlm_get_dram_map(int node, uint64_t *dram_map, int nentries)
+{
+ uint64_t bridgebase, base, lim;
+ uint32_t val;
+ unsigned int barreg, limreg, xlatreg;
+ int i, n, rv;
+
+ /* Look only at mapping on Node 0, we don't handle crazy configs */
+ bridgebase = nlm_get_bridge_regbase(0);
+ rv = 0;
+ for (i = 0; i < 8; i++) {
+ if (rv + 1 >= nentries)
+ break;
+ if (cpu_is_xlp9xx()) {
+ barreg = BRIDGE_9XX_DRAM_BAR(i);
+ limreg = BRIDGE_9XX_DRAM_LIMIT(i);
+ xlatreg = BRIDGE_9XX_DRAM_NODE_TRANSLN(i);
+ } else {
+ barreg = BRIDGE_DRAM_BAR(i);
+ limreg = BRIDGE_DRAM_LIMIT(i);
+ xlatreg = BRIDGE_DRAM_NODE_TRANSLN(i);
+ }
+ if (node >= 0) {
+ /* node specified, get node mapping of BAR */
+ val = nlm_read_bridge_reg(bridgebase, xlatreg);
+ n = (val >> 1) & 0x3;
+ if (n != node)
+ continue;
+ }
+ val = nlm_read_bridge_reg(bridgebase, barreg);
+ val = (val >> 12) & 0xfffff;
+ base = (uint64_t) val << 20;
+ val = nlm_read_bridge_reg(bridgebase, limreg);
+ val = (val >> 12) & 0xfffff;
+ if (val == 0) /* BAR not used */
+ continue;
+ lim = ((uint64_t)val + 1) << 20;
+ dram_map[rv] = base;
+ dram_map[rv + 1] = lim;
+ rv += 2;
+ }
+ return rv;
+}
diff --git a/arch/mips/netlogic/xlp/setup.c b/arch/mips/netlogic/xlp/setup.c
new file mode 100644
index 000000000..f743fd9da
--- /dev/null
+++ b/arch/mips/netlogic/xlp/setup.c
@@ -0,0 +1,179 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/of_fdt.h>
+
+#include <asm/idle.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <asm/bootinfo.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+
+uint64_t nlm_io_base;
+struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+cpumask_t nlm_cpumask = CPU_MASK_CPU0;
+unsigned int nlm_threads_per_core;
+
+static void nlm_linux_exit(void)
+{
+ uint64_t sysbase = nlm_get_node(0)->sysbase;
+
+ if (cpu_is_xlp9xx())
+ nlm_write_sys_reg(sysbase, SYS_9XX_CHIP_RESET, 1);
+ else
+ nlm_write_sys_reg(sysbase, SYS_CHIP_RESET, 1);
+ for ( ; ; )
+ cpu_wait();
+}
+
+static void nlm_fixup_mem(void)
+{
+ const int pref_backup = 512;
+ int i;
+
+ for (i = 0; i < boot_mem_map.nr_map; i++) {
+ if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
+ continue;
+ boot_mem_map.map[i].size -= pref_backup;
+ }
+}
+
+static void __init xlp_init_mem_from_bars(void)
+{
+ uint64_t map[16];
+ int i, n;
+
+ n = nlm_get_dram_map(-1, map, ARRAY_SIZE(map)); /* -1 : all nodes */
+ for (i = 0; i < n; i += 2) {
+ /* exclude 0x1000_0000-0x2000_0000, u-boot device */
+ if (map[i] <= 0x10000000 && map[i+1] > 0x10000000)
+ map[i+1] = 0x10000000;
+ if (map[i] > 0x10000000 && map[i] < 0x20000000)
+ map[i] = 0x20000000;
+
+ add_memory_region(map[i], map[i+1] - map[i], BOOT_MEM_RAM);
+ }
+}
+
+void __init plat_mem_setup(void)
+{
+#ifdef CONFIG_SMP
+ nlm_wakeup_secondary_cpus();
+
+ /* update TLB size after waking up threads */
+ current_cpu_data.tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1;
+
+ register_smp_ops(&nlm_smp_ops);
+#endif
+ _machine_restart = (void (*)(char *))nlm_linux_exit;
+ _machine_halt = nlm_linux_exit;
+ pm_power_off = nlm_linux_exit;
+
+ /* memory and bootargs from DT */
+ xlp_early_init_devtree();
+
+ if (boot_mem_map.nr_map == 0) {
+ pr_info("Using DRAM BARs for memory map.\n");
+ xlp_init_mem_from_bars();
+ }
+ /* Calculate and setup wired entries for mapped kernel */
+ nlm_fixup_mem();
+}
+
+const char *get_system_type(void)
+{
+ switch (read_c0_prid() & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ return "Broadcom XLPII Series";
+ default:
+ return "Netlogic XLP Series";
+ }
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* Nothing yet */
+}
+
+void xlp_mmu_init(void)
+{
+ u32 conf4;
+
+ if (cpu_is_xlpii()) {
+ /* XLPII series has extended pagesize in config 4 */
+ conf4 = read_c0_config4() & ~0x1f00u;
+ write_c0_config4(conf4 | ((PAGE_SHIFT - 10) / 2 << 8));
+ } else {
+ /* enable extended TLB and Large Fixed TLB */
+ write_c0_config6(read_c0_config6() | 0x24);
+
+ /* set page mask of extended Fixed TLB in config7 */
+ write_c0_config7(PM_DEFAULT_MASK >>
+ (13 + (ffz(PM_DEFAULT_MASK >> 13) / 2)));
+ }
+}
+
+void nlm_percpu_init(int hwcpuid)
+{
+}
+
+void __init prom_init(void)
+{
+ void *reset_vec;
+
+ nlm_io_base = CKSEG1ADDR(XLP_DEFAULT_IO_BASE);
+ nlm_init_boot_cpu();
+ xlp_mmu_init();
+ nlm_node_init(0);
+ xlp_dt_init((void *)(long)fw_arg0);
+
+ /* Update reset entry point with CPU init code */
+ reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS);
+ memset(reset_vec, 0, RESET_VEC_SIZE);
+ memcpy(reset_vec, (void *)nlm_reset_entry,
+ (nlm_reset_entry_end - nlm_reset_entry));
+
+#ifdef CONFIG_SMP
+ cpumask_setall(&nlm_cpumask);
+#endif
+}
diff --git a/arch/mips/netlogic/xlp/usb-init-xlp2.c b/arch/mips/netlogic/xlp/usb-init-xlp2.c
new file mode 100644
index 000000000..2524939a5
--- /dev/null
+++ b/arch/mips/netlogic/xlp/usb-init-xlp2.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (c) 2003-2013 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/platform_device.h>
+#include <linux/irq.h>
+
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+
+#define XLPII_USB3_CTL_0 0xc0
+#define XLPII_VAUXRST BIT(0)
+#define XLPII_VCCRST BIT(1)
+#define XLPII_NUM2PORT 9
+#define XLPII_NUM3PORT 13
+#define XLPII_RTUNEREQ BIT(20)
+#define XLPII_MS_CSYSREQ BIT(21)
+#define XLPII_XS_CSYSREQ BIT(22)
+#define XLPII_RETENABLEN BIT(23)
+#define XLPII_TX2RX BIT(24)
+#define XLPII_XHCIREV BIT(25)
+#define XLPII_ECCDIS BIT(26)
+
+#define XLPII_USB3_INT_REG 0xc2
+#define XLPII_USB3_INT_MASK 0xc3
+
+#define XLPII_USB_PHY_TEST 0xc6
+#define XLPII_PRESET BIT(0)
+#define XLPII_ATERESET BIT(1)
+#define XLPII_LOOPEN BIT(2)
+#define XLPII_TESTPDHSP BIT(3)
+#define XLPII_TESTPDSSP BIT(4)
+#define XLPII_TESTBURNIN BIT(5)
+
+#define XLPII_USB_PHY_LOS_LV 0xc9
+#define XLPII_LOSLEV 0
+#define XLPII_LOSBIAS 5
+#define XLPII_SQRXTX 8
+#define XLPII_TXBOOST 11
+#define XLPII_RSLKSEL 16
+#define XLPII_FSEL 20
+
+#define XLPII_USB_RFCLK_REG 0xcc
+#define XLPII_VVLD 30
+
+#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
+
+#define nlm_xlpii_get_usb_pcibase(node, inst) \
+ nlm_pcicfg_base(cpu_is_xlp9xx() ? \
+ XLP9XX_IO_USB_OFFSET(node, inst) : \
+ XLP2XX_IO_USB_OFFSET(node, inst))
+#define nlm_xlpii_get_usb_regbase(node, inst) \
+ (nlm_xlpii_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
+
+static void xlp2xx_usb_ack(struct irq_data *data)
+{
+ u64 port_addr;
+
+ switch (data->irq) {
+ case PIC_2XX_XHCI_0_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(0, 1);
+ break;
+ case PIC_2XX_XHCI_1_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(0, 2);
+ break;
+ case PIC_2XX_XHCI_2_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(0, 3);
+ break;
+ default:
+ pr_err("No matching USB irq!\n");
+ return;
+ }
+ nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
+}
+
+static void xlp9xx_usb_ack(struct irq_data *data)
+{
+ u64 port_addr;
+ int node, irq;
+
+ /* Find the node and irq on the node */
+ irq = data->irq % NLM_IRQS_PER_NODE;
+ node = data->irq / NLM_IRQS_PER_NODE;
+
+ switch (irq) {
+ case PIC_9XX_XHCI_0_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(node, 1);
+ break;
+ case PIC_9XX_XHCI_1_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(node, 2);
+ break;
+ case PIC_9XX_XHCI_2_IRQ:
+ port_addr = nlm_xlpii_get_usb_regbase(node, 3);
+ break;
+ default:
+ pr_err("No matching USB irq %d node %d!\n", irq, node);
+ return;
+ }
+ nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
+}
+
+static void nlm_xlpii_usb_hw_reset(int node, int port)
+{
+ u64 port_addr, xhci_base, pci_base;
+ void __iomem *corebase;
+ u32 val;
+
+ port_addr = nlm_xlpii_get_usb_regbase(node, port);
+
+ /* Set frequency */
+ val = nlm_read_usb_reg(port_addr, XLPII_USB_PHY_LOS_LV);
+ val &= ~(0x3f << XLPII_FSEL);
+ val |= (0x27 << XLPII_FSEL);
+ nlm_write_usb_reg(port_addr, XLPII_USB_PHY_LOS_LV, val);
+
+ val = nlm_read_usb_reg(port_addr, XLPII_USB_RFCLK_REG);
+ val |= (1 << XLPII_VVLD);
+ nlm_write_usb_reg(port_addr, XLPII_USB_RFCLK_REG, val);
+
+ /* PHY reset */
+ val = nlm_read_usb_reg(port_addr, XLPII_USB_PHY_TEST);
+ val &= (XLPII_ATERESET | XLPII_LOOPEN | XLPII_TESTPDHSP
+ | XLPII_TESTPDSSP | XLPII_TESTBURNIN);
+ nlm_write_usb_reg(port_addr, XLPII_USB_PHY_TEST, val);
+
+ /* Setup control register */
+ val = XLPII_VAUXRST | XLPII_VCCRST | (1 << XLPII_NUM2PORT)
+ | (1 << XLPII_NUM3PORT) | XLPII_MS_CSYSREQ | XLPII_XS_CSYSREQ
+ | XLPII_RETENABLEN | XLPII_XHCIREV;
+ nlm_write_usb_reg(port_addr, XLPII_USB3_CTL_0, val);
+
+ /* Enable interrupts */
+ nlm_write_usb_reg(port_addr, XLPII_USB3_INT_MASK, 0x00000001);
+
+ /* Clear all interrupts */
+ nlm_write_usb_reg(port_addr, XLPII_USB3_INT_REG, 0xffffffff);
+
+ udelay(2000);
+
+ /* XHCI configuration at PCI mem */
+ pci_base = nlm_xlpii_get_usb_pcibase(node, port);
+ xhci_base = nlm_read_usb_reg(pci_base, 0x4) & ~0xf;
+ corebase = ioremap(xhci_base, 0x10000);
+ if (!corebase)
+ return;
+
+ writel(0x240002, corebase + 0xc2c0);
+ /* GCTL 0xc110 */
+ val = readl(corebase + 0xc110);
+ val &= ~(0x3 << 12);
+ val |= (1 << 12);
+ writel(val, corebase + 0xc110);
+ udelay(100);
+
+ /* PHYCFG 0xc200 */
+ val = readl(corebase + 0xc200);
+ val &= ~(1 << 6);
+ writel(val, corebase + 0xc200);
+ udelay(100);
+
+ /* PIPECTL 0xc2c0 */
+ val = readl(corebase + 0xc2c0);
+ val &= ~(1 << 17);
+ writel(val, corebase + 0xc2c0);
+
+ iounmap(corebase);
+}
+
+static int __init nlm_platform_xlpii_usb_init(void)
+{
+ int node;
+
+ if (!cpu_is_xlpii())
+ return 0;
+
+ if (!cpu_is_xlp9xx()) {
+ /* XLP 2XX single node */
+ pr_info("Initializing 2XX USB Interface\n");
+ nlm_xlpii_usb_hw_reset(0, 1);
+ nlm_xlpii_usb_hw_reset(0, 2);
+ nlm_xlpii_usb_hw_reset(0, 3);
+ nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_0_IRQ, xlp2xx_usb_ack);
+ nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_1_IRQ, xlp2xx_usb_ack);
+ nlm_set_pic_extra_ack(0, PIC_2XX_XHCI_2_IRQ, xlp2xx_usb_ack);
+ return 0;
+ }
+
+ /* XLP 9XX, multi-node */
+ pr_info("Initializing 9XX/5XX USB Interface\n");
+ for (node = 0; node < NLM_NR_NODES; node++) {
+ if (!nlm_node_present(node))
+ continue;
+ nlm_xlpii_usb_hw_reset(node, 1);
+ nlm_xlpii_usb_hw_reset(node, 2);
+ nlm_xlpii_usb_hw_reset(node, 3);
+ nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_0_IRQ, xlp9xx_usb_ack);
+ nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_1_IRQ, xlp9xx_usb_ack);
+ nlm_set_pic_extra_ack(node, PIC_9XX_XHCI_2_IRQ, xlp9xx_usb_ack);
+ }
+ return 0;
+}
+
+arch_initcall(nlm_platform_xlpii_usb_init);
+
+static u64 xlp_usb_dmamask = ~(u32)0;
+
+/* Fixup the IRQ for USB devices which is exist on XLP9XX SOC PCIE bus */
+static void nlm_xlp9xx_usb_fixup_final(struct pci_dev *dev)
+{
+ int node;
+
+ node = xlp_socdev_to_node(dev);
+ dev->dev.dma_mask = &xlp_usb_dmamask;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ switch (dev->devfn) {
+ case 0x21:
+ dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_0_IRQ);
+ break;
+ case 0x22:
+ dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_1_IRQ);
+ break;
+ case 0x23:
+ dev->irq = nlm_irq_to_xirq(node, PIC_9XX_XHCI_2_IRQ);
+ break;
+ }
+}
+
+/* Fixup the IRQ for USB devices which is exist on XLP2XX SOC PCIE bus */
+static void nlm_xlp2xx_usb_fixup_final(struct pci_dev *dev)
+{
+ dev->dev.dma_mask = &xlp_usb_dmamask;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ switch (dev->devfn) {
+ case 0x21:
+ dev->irq = PIC_2XX_XHCI_0_IRQ;
+ break;
+ case 0x22:
+ dev->irq = PIC_2XX_XHCI_1_IRQ;
+ break;
+ case 0x23:
+ dev->irq = PIC_2XX_XHCI_2_IRQ;
+ break;
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_XLP9XX_XHCI,
+ nlm_xlp9xx_usb_fixup_final);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_XHCI,
+ nlm_xlp2xx_usb_fixup_final);
diff --git a/arch/mips/netlogic/xlp/usb-init.c b/arch/mips/netlogic/xlp/usb-init.c
new file mode 100644
index 000000000..f8117985f
--- /dev/null
+++ b/arch/mips/netlogic/xlp/usb-init.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+
+/*
+ * USB glue logic registers, used only during initialization
+ */
+#define USB_CTL_0 0x01
+#define USB_PHY_0 0x0A
+#define USB_PHY_RESET 0x01
+#define USB_PHY_PORT_RESET_0 0x10
+#define USB_PHY_PORT_RESET_1 0x20
+#define USB_CONTROLLER_RESET 0x01
+#define USB_INT_STATUS 0x0E
+#define USB_INT_EN 0x0F
+#define USB_PHY_INTERRUPT_EN 0x01
+#define USB_OHCI_INTERRUPT_EN 0x02
+#define USB_OHCI_INTERRUPT1_EN 0x04
+#define USB_OHCI_INTERRUPT2_EN 0x08
+#define USB_CTRL_INTERRUPT_EN 0x10
+
+#define nlm_read_usb_reg(b, r) nlm_read_reg(b, r)
+#define nlm_write_usb_reg(b, r, v) nlm_write_reg(b, r, v)
+#define nlm_get_usb_pcibase(node, inst) \
+ nlm_pcicfg_base(XLP_IO_USB_OFFSET(node, inst))
+#define nlm_get_usb_regbase(node, inst) \
+ (nlm_get_usb_pcibase(node, inst) + XLP_IO_PCI_HDRSZ)
+
+static void nlm_usb_intr_en(int node, int port)
+{
+ uint32_t val;
+ uint64_t port_addr;
+
+ port_addr = nlm_get_usb_regbase(node, port);
+ val = nlm_read_usb_reg(port_addr, USB_INT_EN);
+ val = USB_CTRL_INTERRUPT_EN | USB_OHCI_INTERRUPT_EN |
+ USB_OHCI_INTERRUPT1_EN | USB_OHCI_INTERRUPT2_EN;
+ nlm_write_usb_reg(port_addr, USB_INT_EN, val);
+}
+
+static void nlm_usb_hw_reset(int node, int port)
+{
+ uint64_t port_addr;
+ uint32_t val;
+
+ /* reset USB phy */
+ port_addr = nlm_get_usb_regbase(node, port);
+ val = nlm_read_usb_reg(port_addr, USB_PHY_0);
+ val &= ~(USB_PHY_RESET | USB_PHY_PORT_RESET_0 | USB_PHY_PORT_RESET_1);
+ nlm_write_usb_reg(port_addr, USB_PHY_0, val);
+
+ mdelay(100);
+ val = nlm_read_usb_reg(port_addr, USB_CTL_0);
+ val &= ~(USB_CONTROLLER_RESET);
+ val |= 0x4;
+ nlm_write_usb_reg(port_addr, USB_CTL_0, val);
+}
+
+static int __init nlm_platform_usb_init(void)
+{
+ if (cpu_is_xlpii())
+ return 0;
+
+ pr_info("Initializing USB Interface\n");
+ nlm_usb_hw_reset(0, 0);
+ nlm_usb_hw_reset(0, 3);
+
+ /* Enable PHY interrupts */
+ nlm_usb_intr_en(0, 0);
+ nlm_usb_intr_en(0, 3);
+
+ return 0;
+}
+
+arch_initcall(nlm_platform_usb_init);
+
+static u64 xlp_usb_dmamask = ~(u32)0;
+
+/* Fixup the IRQ for USB devices which is exist on XLP SOC PCIE bus */
+static void nlm_usb_fixup_final(struct pci_dev *dev)
+{
+ dev->dev.dma_mask = &xlp_usb_dmamask;
+ dev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ switch (dev->devfn) {
+ case 0x10:
+ dev->irq = PIC_EHCI_0_IRQ;
+ break;
+ case 0x11:
+ dev->irq = PIC_OHCI_0_IRQ;
+ break;
+ case 0x12:
+ dev->irq = PIC_OHCI_1_IRQ;
+ break;
+ case 0x13:
+ dev->irq = PIC_EHCI_1_IRQ;
+ break;
+ case 0x14:
+ dev->irq = PIC_OHCI_2_IRQ;
+ break;
+ case 0x15:
+ dev->irq = PIC_OHCI_3_IRQ;
+ break;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_EHCI,
+ nlm_usb_fixup_final);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_OHCI,
+ nlm_usb_fixup_final);
diff --git a/arch/mips/netlogic/xlp/wakeup.c b/arch/mips/netlogic/xlp/wakeup.c
new file mode 100644
index 000000000..d61004dd7
--- /dev/null
+++ b/arch/mips/netlogic/xlp/wakeup.c
@@ -0,0 +1,212 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/threads.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/string.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/mips-extns.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+
+static int xlp_wakeup_core(uint64_t sysbase, int node, int core)
+{
+ uint32_t coremask, value;
+ int count, resetreg;
+
+ coremask = (1 << core);
+
+ /* Enable CPU clock in case of 8xx/3xx */
+ if (!cpu_is_xlpii()) {
+ value = nlm_read_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_CORE_DFS_DIS_CTRL, value);
+ }
+
+ /* On 9XX, mark coherent first */
+ if (cpu_is_xlp9xx()) {
+ value = nlm_read_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, SYS_9XX_CPU_NONCOHERENT_MODE, value);
+ }
+
+ /* Remove CPU Reset */
+ resetreg = cpu_is_xlp9xx() ? SYS_9XX_CPU_RESET : SYS_CPU_RESET;
+ value = nlm_read_sys_reg(sysbase, resetreg);
+ value &= ~coremask;
+ nlm_write_sys_reg(sysbase, resetreg, value);
+
+ /* We are done on 9XX */
+ if (cpu_is_xlp9xx())
+ return 1;
+
+ /* Poll for CPU to mark itself coherent on other type of XLP */
+ count = 100000;
+ do {
+ value = nlm_read_sys_reg(sysbase, SYS_CPU_NONCOHERENT_MODE);
+ } while ((value & coremask) != 0 && --count > 0);
+
+ return count != 0;
+}
+
+static int wait_for_cpus(int cpu, int bootcpu)
+{
+ volatile uint32_t *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
+ int i, count, notready;
+
+ count = 0x800000;
+ do {
+ notready = nlm_threads_per_core;
+ for (i = 0; i < nlm_threads_per_core; i++)
+ if (cpu_ready[cpu + i] || (cpu + i) == bootcpu)
+ --notready;
+ } while (notready != 0 && --count > 0);
+
+ return count != 0;
+}
+
+static void xlp_enable_secondary_cores(const cpumask_t *wakeup_mask)
+{
+ struct nlm_soc_info *nodep;
+ uint64_t syspcibase, fusebase;
+ uint32_t syscoremask, mask, fusemask;
+ int core, n, cpu, ncores;
+
+ for (n = 0; n < NLM_NR_NODES; n++) {
+ if (n != 0) {
+ /* check if node exists and is online */
+ if (cpu_is_xlp9xx()) {
+ int b = xlp9xx_get_socbus(n);
+ pr_info("Node %d SoC PCI bus %d.\n", n, b);
+ if (b == 0)
+ break;
+ } else {
+ syspcibase = nlm_get_sys_pcibase(n);
+ if (nlm_read_reg(syspcibase, 0) == 0xffffffff)
+ break;
+ }
+ nlm_node_init(n);
+ }
+
+ /* read cores in reset from SYS */
+ nodep = nlm_get_node(n);
+
+ if (cpu_is_xlp9xx()) {
+ fusebase = nlm_get_fuse_regbase(n);
+ fusemask = nlm_read_reg(fusebase, FUSE_9XX_DEVCFG6);
+ switch (read_c0_prid() & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP5XX:
+ mask = 0xff;
+ break;
+ case PRID_IMP_NETLOGIC_XLP9XX:
+ default:
+ mask = 0xfffff;
+ break;
+ }
+ } else {
+ fusemask = nlm_read_sys_reg(nodep->sysbase,
+ SYS_EFUSE_DEVICE_CFG_STATUS0);
+ switch (read_c0_prid() & PRID_IMP_MASK) {
+ case PRID_IMP_NETLOGIC_XLP3XX:
+ mask = 0xf;
+ break;
+ case PRID_IMP_NETLOGIC_XLP2XX:
+ mask = 0x3;
+ break;
+ case PRID_IMP_NETLOGIC_XLP8XX:
+ default:
+ mask = 0xff;
+ break;
+ }
+ }
+
+ /*
+ * Fused out cores are set in the fusemask, and the remaining
+ * cores are renumbered to range 0 .. nactive-1
+ */
+ syscoremask = (1 << hweight32(~fusemask & mask)) - 1;
+
+ pr_info("Node %d - SYS/FUSE coremask %x\n", n, syscoremask);
+ ncores = nlm_cores_per_node();
+ for (core = 0; core < ncores; core++) {
+ /* we will be on node 0 core 0 */
+ if (n == 0 && core == 0)
+ continue;
+
+ /* see if the core exists */
+ if ((syscoremask & (1 << core)) == 0)
+ continue;
+
+ /* see if at least the first hw thread is enabled */
+ cpu = (n * ncores + core) * NLM_THREADS_PER_CORE;
+ if (!cpumask_test_cpu(cpu, wakeup_mask))
+ continue;
+
+ /* wake up the core */
+ if (!xlp_wakeup_core(nodep->sysbase, n, core))
+ continue;
+
+ /* core is up */
+ nodep->coremask |= 1u << core;
+
+ /* spin until the hw threads sets their ready */
+ if (!wait_for_cpus(cpu, 0))
+ pr_err("Node %d : timeout core %d\n", n, core);
+ }
+ }
+}
+
+void xlp_wakeup_secondary_cpus(void)
+{
+ /*
+ * In case of u-boot, the secondaries are in reset
+ * first wakeup core 0 threads
+ */
+ xlp_boot_core0_siblings();
+ if (!wait_for_cpus(0, 0))
+ pr_err("Node 0 : timeout core 0\n");
+
+ /* now get other cores out of reset */
+ xlp_enable_secondary_cores(&nlm_cpumask);
+}
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile
new file mode 100644
index 000000000..05902bc6f
--- /dev/null
+++ b/arch/mips/netlogic/xlr/Makefile
@@ -0,0 +1,2 @@
+obj-y += fmn.o fmn-config.o setup.o platform.o platform-flash.o
+obj-$(CONFIG_SMP) += wakeup.o
diff --git a/arch/mips/netlogic/xlr/fmn-config.c b/arch/mips/netlogic/xlr/fmn-config.c
new file mode 100644
index 000000000..c7622c6e5
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn-config.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <asm/cpu-info.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/mipsregs.h>
+#include <asm/netlogic/xlr/fmn.h>
+#include <asm/netlogic/xlr/xlr.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+
+struct xlr_board_fmn_config xlr_board_fmn_config;
+
+static void __maybe_unused print_credit_config(struct xlr_fmn_info *fmn_info)
+{
+ int bkt;
+
+ pr_info("Bucket size :\n");
+ pr_info("Station\t: Size\n");
+ for (bkt = 0; bkt < 16; bkt++)
+ pr_info(" %d %d %d %d %d %d %d %d\n",
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 0],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 1],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 2],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 3],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 4],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 5],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 6],
+ xlr_board_fmn_config.bucket_size[(bkt * 8) + 7]);
+ pr_info("\n");
+
+ pr_info("Credits distribution :\n");
+ pr_info("Station\t: Size\n");
+ for (bkt = 0; bkt < 16; bkt++)
+ pr_info(" %d %d %d %d %d %d %d %d\n",
+ fmn_info->credit_config[(bkt * 8) + 0],
+ fmn_info->credit_config[(bkt * 8) + 1],
+ fmn_info->credit_config[(bkt * 8) + 2],
+ fmn_info->credit_config[(bkt * 8) + 3],
+ fmn_info->credit_config[(bkt * 8) + 4],
+ fmn_info->credit_config[(bkt * 8) + 5],
+ fmn_info->credit_config[(bkt * 8) + 6],
+ fmn_info->credit_config[(bkt * 8) + 7]);
+ pr_info("\n");
+}
+
+static void check_credit_distribution(void)
+{
+ struct xlr_board_fmn_config *cfg = &xlr_board_fmn_config;
+ int bkt, n, total_credits, ncores;
+
+ ncores = hweight32(nlm_current_node()->coremask);
+ for (bkt = 0; bkt < 128; bkt++) {
+ total_credits = 0;
+ for (n = 0; n < ncores; n++)
+ total_credits += cfg->cpu[n].credit_config[bkt];
+ total_credits += cfg->gmac[0].credit_config[bkt];
+ total_credits += cfg->gmac[1].credit_config[bkt];
+ total_credits += cfg->dma.credit_config[bkt];
+ total_credits += cfg->cmp.credit_config[bkt];
+ total_credits += cfg->sae.credit_config[bkt];
+ total_credits += cfg->xgmac[0].credit_config[bkt];
+ total_credits += cfg->xgmac[1].credit_config[bkt];
+ if (total_credits > cfg->bucket_size[bkt])
+ pr_err("ERROR: Bucket %d: credits (%d) > size (%d)\n",
+ bkt, total_credits, cfg->bucket_size[bkt]);
+ }
+ pr_info("Credit distribution complete.\n");
+}
+
+/**
+ * Configure bucket size and credits for a device. 'size' is the size of
+ * the buckets for the device. This size is distributed among all the CPUs
+ * so that all of them can send messages to the device.
+ *
+ * The device is also given 'cpu_credits' to send messages to the CPUs
+ *
+ * @dev_info: FMN information structure for each devices
+ * @start_stn_id: Starting station id of dev_info
+ * @end_stn_id: End station id of dev_info
+ * @num_buckets: Total number of buckets for den_info
+ * @cpu_credits: Allowed credits to cpu for each devices pointing by dev_info
+ * @size: Size of the each buckets in the device station
+ */
+static void setup_fmn_cc(struct xlr_fmn_info *dev_info, int start_stn_id,
+ int end_stn_id, int num_buckets, int cpu_credits, int size)
+{
+ int i, j, num_core, n, credits_per_cpu;
+ struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
+
+ num_core = hweight32(nlm_current_node()->coremask);
+ dev_info->num_buckets = num_buckets;
+ dev_info->start_stn_id = start_stn_id;
+ dev_info->end_stn_id = end_stn_id;
+
+ n = num_core;
+ if (num_core == 3)
+ n = 4;
+
+ for (i = start_stn_id; i <= end_stn_id; i++) {
+ xlr_board_fmn_config.bucket_size[i] = size;
+
+ /* Dividing device credits equally to cpus */
+ credits_per_cpu = size / n;
+ for (j = 0; j < num_core; j++)
+ cpu[j].credit_config[i] = credits_per_cpu;
+
+ /* credits left to distribute */
+ credits_per_cpu = size - (credits_per_cpu * num_core);
+
+ /* distribute the remaining credits (if any), among cores */
+ for (j = 0; (j < num_core) && (credits_per_cpu >= 4); j++) {
+ cpu[j].credit_config[i] += 4;
+ credits_per_cpu -= 4;
+ }
+ }
+
+ /* Distributing cpu per bucket credits to devices */
+ for (i = 0; i < num_core; i++) {
+ for (j = 0; j < FMN_CORE_NBUCKETS; j++)
+ dev_info->credit_config[(i * 8) + j] = cpu_credits;
+ }
+}
+
+/*
+ * Each core has 256 slots and 8 buckets,
+ * Configure the 8 buckets each with 32 slots
+ */
+static void setup_cpu_fmninfo(struct xlr_fmn_info *cpu, int num_core)
+{
+ int i, j;
+
+ for (i = 0; i < num_core; i++) {
+ cpu[i].start_stn_id = (8 * i);
+ cpu[i].end_stn_id = (8 * i + 8);
+
+ for (j = cpu[i].start_stn_id; j < cpu[i].end_stn_id; j++)
+ xlr_board_fmn_config.bucket_size[j] = 32;
+ }
+}
+
+/**
+ * Setup the FMN details for each devices according to the device available
+ * in each variant of XLR/XLS processor
+ */
+void xlr_board_info_setup(void)
+{
+ struct xlr_fmn_info *cpu = xlr_board_fmn_config.cpu;
+ struct xlr_fmn_info *gmac = xlr_board_fmn_config.gmac;
+ struct xlr_fmn_info *xgmac = xlr_board_fmn_config.xgmac;
+ struct xlr_fmn_info *dma = &xlr_board_fmn_config.dma;
+ struct xlr_fmn_info *cmp = &xlr_board_fmn_config.cmp;
+ struct xlr_fmn_info *sae = &xlr_board_fmn_config.sae;
+ int processor_id, num_core;
+
+ num_core = hweight32(nlm_current_node()->coremask);
+ processor_id = read_c0_prid() & PRID_IMP_MASK;
+
+ setup_cpu_fmninfo(cpu, num_core);
+ switch (processor_id) {
+ case PRID_IMP_NETLOGIC_XLS104:
+ case PRID_IMP_NETLOGIC_XLS108:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS204:
+ case PRID_IMP_NETLOGIC_XLS208:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS404:
+ case PRID_IMP_NETLOGIC_XLS408:
+ case PRID_IMP_NETLOGIC_XLS404B:
+ case PRID_IMP_NETLOGIC_XLS408B:
+ case PRID_IMP_NETLOGIC_XLS416B:
+ case PRID_IMP_NETLOGIC_XLS608B:
+ case PRID_IMP_NETLOGIC_XLS616B:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 8, 32);
+ setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+ FMN_STNID_GMAC1_TX3, 8, 8, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(cmp, FMN_STNID_CMP_0,
+ FMN_STNID_CMP_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLS412B:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 8, 32);
+ setup_fmn_cc(&gmac[1], FMN_STNID_GMAC1_FR_0,
+ FMN_STNID_GMAC1_TX3, 8, 8, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(cmp, FMN_STNID_CMP_0,
+ FMN_STNID_CMP_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 8, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR308:
+ case PRID_IMP_NETLOGIC_XLR308C:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR532:
+ case PRID_IMP_NETLOGIC_XLR532C:
+ case PRID_IMP_NETLOGIC_XLR516C:
+ case PRID_IMP_NETLOGIC_XLR508C:
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 16, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 8, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+
+ case PRID_IMP_NETLOGIC_XLR732:
+ case PRID_IMP_NETLOGIC_XLR716:
+ setup_fmn_cc(&xgmac[0], FMN_STNID_XMAC0_00_TX,
+ FMN_STNID_XMAC0_15_TX, 8, 0, 32);
+ setup_fmn_cc(&xgmac[1], FMN_STNID_XMAC1_00_TX,
+ FMN_STNID_XMAC1_15_TX, 8, 0, 32);
+ setup_fmn_cc(&gmac[0], FMN_STNID_GMAC0,
+ FMN_STNID_GMAC0_TX3, 8, 24, 32);
+ setup_fmn_cc(dma, FMN_STNID_DMA_0,
+ FMN_STNID_DMA_3, 4, 4, 64);
+ setup_fmn_cc(sae, FMN_STNID_SEC0,
+ FMN_STNID_SEC1, 2, 4, 128);
+ break;
+ default:
+ pr_err("Unknown CPU with processor ID [%d]\n", processor_id);
+ pr_err("Error: Cannot initialize FMN credits.\n");
+ }
+
+ check_credit_distribution();
+
+#if 0 /* debug */
+ print_credit_config(&cpu[0]);
+ print_credit_config(&gmac[0]);
+#endif
+}
diff --git a/arch/mips/netlogic/xlr/fmn.c b/arch/mips/netlogic/xlr/fmn.c
new file mode 100644
index 000000000..d428e8471
--- /dev/null
+++ b/arch/mips/netlogic/xlr/fmn.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright (c) 2003-2012 Broadcom Corporation
+ * All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/irqreturn.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/mipsregs.h>
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/xlr/fmn.h>
+#include <asm/netlogic/common.h>
+
+#define COP2_CC_INIT_CPU_DEST(dest, conf) \
+do { \
+ nlm_write_c2_cc##dest(0, conf[(dest * 8) + 0]); \
+ nlm_write_c2_cc##dest(1, conf[(dest * 8) + 1]); \
+ nlm_write_c2_cc##dest(2, conf[(dest * 8) + 2]); \
+ nlm_write_c2_cc##dest(3, conf[(dest * 8) + 3]); \
+ nlm_write_c2_cc##dest(4, conf[(dest * 8) + 4]); \
+ nlm_write_c2_cc##dest(5, conf[(dest * 8) + 5]); \
+ nlm_write_c2_cc##dest(6, conf[(dest * 8) + 6]); \
+ nlm_write_c2_cc##dest(7, conf[(dest * 8) + 7]); \
+} while (0)
+
+struct fmn_message_handler {
+ void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *);
+ void *arg;
+} msg_handlers[128];
+
+/*
+ * FMN interrupt handler. We configure the FMN so that any messages in
+ * any of the CPU buckets will trigger an interrupt on the CPU.
+ * The message can be from any device on the FMN (like NAE/SAE/DMA).
+ * The source station id is used to figure out which of the registered
+ * handlers have to be called.
+ */
+static irqreturn_t fmn_message_handler(int irq, void *data)
+{
+ struct fmn_message_handler *hndlr;
+ int bucket, rv;
+ int size = 0, code = 0, src_stnid = 0;
+ struct nlm_fmn_msg msg;
+ uint32_t mflags, bkt_status;
+
+ mflags = nlm_cop2_enable_irqsave();
+ /* Disable message ring interrupt */
+ nlm_fmn_setup_intr(irq, 0);
+ while (1) {
+ /* 8 bkts per core, [24:31] each bit represents one bucket
+ * Bit is Zero if bucket is not empty */
+ bkt_status = (nlm_read_c2_status0() >> 24) & 0xff;
+ if (bkt_status == 0xff)
+ break;
+ for (bucket = 0; bucket < 8; bucket++) {
+ /* Continue on empty bucket */
+ if (bkt_status & (1 << bucket))
+ continue;
+ rv = nlm_fmn_receive(bucket, &size, &code, &src_stnid,
+ &msg);
+ if (rv != 0)
+ continue;
+
+ hndlr = &msg_handlers[src_stnid];
+ if (hndlr->action == NULL)
+ pr_warn("No msgring handler for stnid %d\n",
+ src_stnid);
+ else {
+ nlm_cop2_disable_irqrestore(mflags);
+ hndlr->action(bucket, src_stnid, size, code,
+ &msg, hndlr->arg);
+ mflags = nlm_cop2_enable_irqsave();
+ }
+ }
+ };
+ /* Enable message ring intr, to any thread in core */
+ nlm_fmn_setup_intr(irq, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_disable_irqrestore(mflags);
+ return IRQ_HANDLED;
+}
+
+struct irqaction fmn_irqaction = {
+ .handler = fmn_message_handler,
+ .flags = IRQF_PERCPU,
+ .name = "fmn",
+};
+
+void xlr_percpu_fmn_init(void)
+{
+ struct xlr_fmn_info *cpu_fmn_info;
+ int *bucket_sizes;
+ uint32_t flags;
+ int id;
+
+ BUG_ON(nlm_thread_id() != 0);
+ id = nlm_core_id();
+
+ bucket_sizes = xlr_board_fmn_config.bucket_size;
+ cpu_fmn_info = &xlr_board_fmn_config.cpu[id];
+ flags = nlm_cop2_enable_irqsave();
+
+ /* Setup bucket sizes for the core. */
+ nlm_write_c2_bucksize(0, bucket_sizes[id * 8 + 0]);
+ nlm_write_c2_bucksize(1, bucket_sizes[id * 8 + 1]);
+ nlm_write_c2_bucksize(2, bucket_sizes[id * 8 + 2]);
+ nlm_write_c2_bucksize(3, bucket_sizes[id * 8 + 3]);
+ nlm_write_c2_bucksize(4, bucket_sizes[id * 8 + 4]);
+ nlm_write_c2_bucksize(5, bucket_sizes[id * 8 + 5]);
+ nlm_write_c2_bucksize(6, bucket_sizes[id * 8 + 6]);
+ nlm_write_c2_bucksize(7, bucket_sizes[id * 8 + 7]);
+
+ /*
+ * For sending FMN messages, we need credits on the destination
+ * bucket. Program the credits this core has on the 128 possible
+ * destination buckets.
+ * We cannot use a loop here, because the the first argument has
+ * to be a constant integer value.
+ */
+ COP2_CC_INIT_CPU_DEST(0, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(1, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(2, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(3, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(4, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(5, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(6, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(7, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(8, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(9, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(10, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(11, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(12, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(13, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(14, cpu_fmn_info->credit_config);
+ COP2_CC_INIT_CPU_DEST(15, cpu_fmn_info->credit_config);
+
+ /* enable FMN interrupts on this CPU */
+ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_disable_irqrestore(flags);
+}
+
+
+/*
+ * Register a FMN message handler with respect to the source station id
+ * @stnid: source station id
+ * @action: Handler function pointer
+ */
+int nlm_register_fmn_handler(int start_stnid, int end_stnid,
+ void (*action)(int, int, int, int, struct nlm_fmn_msg *, void *),
+ void *arg)
+{
+ int sstnid;
+
+ for (sstnid = start_stnid; sstnid <= end_stnid; sstnid++) {
+ msg_handlers[sstnid].arg = arg;
+ smp_wmb();
+ msg_handlers[sstnid].action = action;
+ }
+ pr_debug("Registered FMN msg handler for stnid %d-%d\n",
+ start_stnid, end_stnid);
+ return 0;
+}
+
+void nlm_setup_fmn_irq(void)
+{
+ uint32_t flags;
+
+ /* setup irq only once */
+ setup_irq(IRQ_FMN, &fmn_irqaction);
+
+ flags = nlm_cop2_enable_irqsave();
+ nlm_fmn_setup_intr(IRQ_FMN, (1 << nlm_threads_per_core) - 1);
+ nlm_cop2_disable_irqrestore(flags);
+}
diff --git a/arch/mips/netlogic/xlr/platform-flash.c b/arch/mips/netlogic/xlr/platform-flash.c
new file mode 100644
index 000000000..4d1b4c003
--- /dev/null
+++ b/arch/mips/netlogic/xlr/platform-flash.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright 2011, Netlogic Microsystems.
+ * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/resource.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/physmap.h>
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/flash.h>
+#include <asm/netlogic/xlr/bridge.h>
+#include <asm/netlogic/xlr/gpio.h>
+#include <asm/netlogic/xlr/xlr.h>
+
+/*
+ * Default NOR partition layout
+ */
+static struct mtd_partition xlr_nor_parts[] = {
+ {
+ .name = "User FS",
+ .offset = 0x800000,
+ .size = MTDPART_SIZ_FULL,
+ }
+};
+
+/*
+ * Default NAND partition layout
+ */
+static struct mtd_partition xlr_nand_parts[] = {
+ {
+ .name = "Root Filesystem",
+ .offset = 64 * 64 * 2048,
+ .size = 432 * 64 * 2048,
+ },
+ {
+ .name = "Home Filesystem",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+/* Use PHYSMAP flash for NOR */
+struct physmap_flash_data xlr_nor_data = {
+ .width = 2,
+ .parts = xlr_nor_parts,
+ .nr_parts = ARRAY_SIZE(xlr_nor_parts),
+};
+
+static struct resource xlr_nor_res[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device xlr_nor_dev = {
+ .name = "physmap-flash",
+ .dev = {
+ .platform_data = &xlr_nor_data,
+ },
+ .num_resources = ARRAY_SIZE(xlr_nor_res),
+ .resource = xlr_nor_res,
+};
+
+/*
+ * Use "gen_nand" driver for NAND flash
+ *
+ * There seems to be no way to store a private pointer containing
+ * platform specific info in gen_nand drivier. We will use a global
+ * struct for now, since we currently have only one NAND chip per board.
+ */
+struct xlr_nand_flash_priv {
+ int cs;
+ uint64_t flash_mmio;
+};
+
+static struct xlr_nand_flash_priv nand_priv;
+
+static void xlr_nand_ctrl(struct mtd_info *mtd, int cmd,
+ unsigned int ctrl)
+{
+ if (ctrl & NAND_CLE)
+ nlm_write_reg(nand_priv.flash_mmio,
+ FLASH_NAND_CLE(nand_priv.cs), cmd);
+ else if (ctrl & NAND_ALE)
+ nlm_write_reg(nand_priv.flash_mmio,
+ FLASH_NAND_ALE(nand_priv.cs), cmd);
+}
+
+struct platform_nand_data xlr_nand_data = {
+ .chip = {
+ .nr_chips = 1,
+ .nr_partitions = ARRAY_SIZE(xlr_nand_parts),
+ .chip_delay = 50,
+ .partitions = xlr_nand_parts,
+ },
+ .ctrl = {
+ .cmd_ctrl = xlr_nand_ctrl,
+ },
+};
+
+static struct resource xlr_nand_res[] = {
+ {
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device xlr_nand_dev = {
+ .name = "gen_nand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(xlr_nand_res),
+ .resource = xlr_nand_res,
+ .dev = {
+ .platform_data = &xlr_nand_data,
+ }
+};
+
+/*
+ * XLR/XLS supports upto 8 devices on its FLASH interface. The value in
+ * FLASH_BAR (on the MEM/IO bridge) gives the base for mapping all the
+ * flash devices.
+ * Under this, each flash device has an offset and size given by the
+ * CSBASE_ADDR and CSBASE_MASK registers for the device.
+ *
+ * The CSBASE_ registers are expected to be setup by the bootloader.
+ */
+static void setup_flash_resource(uint64_t flash_mmio,
+ uint64_t flash_map_base, int cs, struct resource *res)
+{
+ u32 base, mask;
+
+ base = nlm_read_reg(flash_mmio, FLASH_CSBASE_ADDR(cs));
+ mask = nlm_read_reg(flash_mmio, FLASH_CSADDR_MASK(cs));
+
+ res->start = flash_map_base + ((unsigned long)base << 16);
+ res->end = res->start + (mask + 1) * 64 * 1024;
+}
+
+static int __init xlr_flash_init(void)
+{
+ uint64_t gpio_mmio, flash_mmio, flash_map_base;
+ u32 gpio_resetcfg, flash_bar;
+ int cs, boot_nand, boot_nor;
+
+ /* Flash address bits 39:24 is in bridge flash BAR */
+ flash_bar = nlm_read_reg(nlm_io_base, BRIDGE_FLASH_BAR);
+ flash_map_base = (flash_bar & 0xffff0000) << 8;
+
+ gpio_mmio = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
+ flash_mmio = nlm_mmio_base(NETLOGIC_IO_FLASH_OFFSET);
+
+ /* Get the chip reset config */
+ gpio_resetcfg = nlm_read_reg(gpio_mmio, GPIO_PWRON_RESET_CFG_REG);
+
+ /* Check for boot flash type */
+ boot_nor = boot_nand = 0;
+ if (nlm_chip_is_xls()) {
+ /* On XLS, check boot from NAND bit (GPIO reset reg bit 16) */
+ if (gpio_resetcfg & (1 << 16))
+ boot_nand = 1;
+
+ /* check boot from PCMCIA, (GPIO reset reg bit 15 */
+ if ((gpio_resetcfg & (1 << 15)) == 0)
+ boot_nor = 1; /* not set, booted from NOR */
+ } else { /* XLR */
+ /* check boot from PCMCIA (bit 16 in GPIO reset on XLR) */
+ if ((gpio_resetcfg & (1 << 16)) == 0)
+ boot_nor = 1; /* not set, booted from NOR */
+ }
+
+ /* boot flash at chip select 0 */
+ cs = 0;
+
+ if (boot_nand) {
+ nand_priv.cs = cs;
+ nand_priv.flash_mmio = flash_mmio;
+ setup_flash_resource(flash_mmio, flash_map_base, cs,
+ xlr_nand_res);
+
+ /* Initialize NAND flash at CS 0 */
+ nlm_write_reg(flash_mmio, FLASH_CSDEV_PARM(cs),
+ FLASH_NAND_CSDEV_PARAM);
+ nlm_write_reg(flash_mmio, FLASH_CSTIME_PARMA(cs),
+ FLASH_NAND_CSTIME_PARAMA);
+ nlm_write_reg(flash_mmio, FLASH_CSTIME_PARMB(cs),
+ FLASH_NAND_CSTIME_PARAMB);
+
+ pr_info("ChipSelect %d: NAND Flash %pR\n", cs, xlr_nand_res);
+ return platform_device_register(&xlr_nand_dev);
+ }
+
+ if (boot_nor) {
+ setup_flash_resource(flash_mmio, flash_map_base, cs,
+ xlr_nor_res);
+ pr_info("ChipSelect %d: NOR Flash %pR\n", cs, xlr_nor_res);
+ return platform_device_register(&xlr_nor_dev);
+ }
+ return 0;
+}
+
+arch_initcall(xlr_flash_init);
diff --git a/arch/mips/netlogic/xlr/platform.c b/arch/mips/netlogic/xlr/platform.c
new file mode 100644
index 000000000..4785932af
--- /dev/null
+++ b/arch/mips/netlogic/xlr/platform.c
@@ -0,0 +1,250 @@
+/*
+ * Copyright 2011, Netlogic Microsystems.
+ * Copyright 2004, Matt Porter <mporter@kernel.crashing.org>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/resource.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+#include <linux/i2c.h>
+#include <linux/usb/ehci_pdriver.h>
+#include <linux/usb/ohci_pdriver.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+
+static unsigned int nlm_xlr_uart_in(struct uart_port *p, int offset)
+{
+ uint64_t uartbase;
+ unsigned int value;
+
+ /* sign extend to 64 bits, if needed */
+ uartbase = (uint64_t)(long)p->membase;
+ value = nlm_read_reg(uartbase, offset);
+
+ /* See XLR/XLS errata */
+ if (offset == UART_MSR)
+ value ^= 0xF0;
+ else if (offset == UART_MCR)
+ value ^= 0x3;
+
+ return value;
+}
+
+static void nlm_xlr_uart_out(struct uart_port *p, int offset, int value)
+{
+ uint64_t uartbase;
+
+ /* sign extend to 64 bits, if needed */
+ uartbase = (uint64_t)(long)p->membase;
+
+ /* See XLR/XLS errata */
+ if (offset == UART_MSR)
+ value ^= 0xF0;
+ else if (offset == UART_MCR)
+ value ^= 0x3;
+
+ nlm_write_reg(uartbase, offset, value);
+}
+
+#define PORT(_irq) \
+ { \
+ .irq = _irq, \
+ .regshift = 2, \
+ .iotype = UPIO_MEM32, \
+ .flags = (UPF_SKIP_TEST | \
+ UPF_FIXED_TYPE | UPF_BOOT_AUTOCONF),\
+ .uartclk = PIC_CLK_HZ, \
+ .type = PORT_16550A, \
+ .serial_in = nlm_xlr_uart_in, \
+ .serial_out = nlm_xlr_uart_out, \
+ }
+
+static struct plat_serial8250_port xlr_uart_data[] = {
+ PORT(PIC_UART_0_IRQ),
+ PORT(PIC_UART_1_IRQ),
+ {},
+};
+
+static struct platform_device uart_device = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev = {
+ .platform_data = xlr_uart_data,
+ },
+};
+
+static int __init nlm_uart_init(void)
+{
+ unsigned long uartbase;
+
+ uartbase = (unsigned long)nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
+ xlr_uart_data[0].membase = (void __iomem *)uartbase;
+ xlr_uart_data[0].mapbase = CPHYSADDR(uartbase);
+
+ uartbase = (unsigned long)nlm_mmio_base(NETLOGIC_IO_UART_1_OFFSET);
+ xlr_uart_data[1].membase = (void __iomem *)uartbase;
+ xlr_uart_data[1].mapbase = CPHYSADDR(uartbase);
+
+ return platform_device_register(&uart_device);
+}
+
+arch_initcall(nlm_uart_init);
+
+#ifdef CONFIG_USB
+/* Platform USB devices, only on XLS chips */
+static u64 xls_usb_dmamask = ~(u32)0;
+#define USB_PLATFORM_DEV(n, i, irq) \
+ { \
+ .name = n, \
+ .id = i, \
+ .num_resources = 2, \
+ .dev = { \
+ .dma_mask = &xls_usb_dmamask, \
+ .coherent_dma_mask = 0xffffffff, \
+ }, \
+ .resource = (struct resource[]) { \
+ { \
+ .flags = IORESOURCE_MEM, \
+ }, \
+ { \
+ .start = irq, \
+ .end = irq, \
+ .flags = IORESOURCE_IRQ, \
+ }, \
+ }, \
+ }
+
+static struct usb_ehci_pdata xls_usb_ehci_pdata = {
+ .caps_offset = 0,
+};
+
+static struct usb_ohci_pdata xls_usb_ohci_pdata;
+
+static struct platform_device xls_usb_ehci_device =
+ USB_PLATFORM_DEV("ehci-platform", 0, PIC_USB_IRQ);
+static struct platform_device xls_usb_ohci_device_0 =
+ USB_PLATFORM_DEV("ohci-platform", 1, PIC_USB_IRQ);
+static struct platform_device xls_usb_ohci_device_1 =
+ USB_PLATFORM_DEV("ohci-platform", 2, PIC_USB_IRQ);
+
+static struct platform_device *xls_platform_devices[] = {
+ &xls_usb_ehci_device,
+ &xls_usb_ohci_device_0,
+ &xls_usb_ohci_device_1,
+};
+
+int xls_platform_usb_init(void)
+{
+ uint64_t usb_mmio, gpio_mmio;
+ unsigned long memres;
+ uint32_t val;
+
+ if (!nlm_chip_is_xls())
+ return 0;
+
+ gpio_mmio = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
+ usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_1_OFFSET);
+
+ /* Clear Rogue Phy INTs */
+ nlm_write_reg(usb_mmio, 49, 0x10000000);
+ /* Enable all interrupts */
+ nlm_write_reg(usb_mmio, 50, 0x1f000000);
+
+ /* Enable ports */
+ nlm_write_reg(usb_mmio, 1, 0x07000500);
+
+ val = nlm_read_reg(gpio_mmio, 21);
+ if (((val >> 22) & 0x01) == 0) {
+ pr_info("Detected USB Device mode - Not supported!\n");
+ nlm_write_reg(usb_mmio, 0, 0x01000000);
+ return 0;
+ }
+
+ pr_info("Detected USB Host mode - Adding XLS USB devices.\n");
+ /* Clear reset, host mode */
+ nlm_write_reg(usb_mmio, 0, 0x02000000);
+
+ /* Memory resource for various XLS usb ports */
+ usb_mmio = nlm_mmio_base(NETLOGIC_IO_USB_0_OFFSET);
+ memres = CPHYSADDR((unsigned long)usb_mmio);
+ xls_usb_ehci_device.resource[0].start = memres;
+ xls_usb_ehci_device.resource[0].end = memres + 0x400 - 1;
+ xls_usb_ehci_device.dev.platform_data = &xls_usb_ehci_pdata;
+
+ memres += 0x400;
+ xls_usb_ohci_device_0.resource[0].start = memres;
+ xls_usb_ohci_device_0.resource[0].end = memres + 0x400 - 1;
+ xls_usb_ohci_device_0.dev.platform_data = &xls_usb_ohci_pdata;
+
+ memres += 0x400;
+ xls_usb_ohci_device_1.resource[0].start = memres;
+ xls_usb_ohci_device_1.resource[0].end = memres + 0x400 - 1;
+ xls_usb_ohci_device_1.dev.platform_data = &xls_usb_ohci_pdata;
+
+ return platform_add_devices(xls_platform_devices,
+ ARRAY_SIZE(xls_platform_devices));
+}
+
+arch_initcall(xls_platform_usb_init);
+#endif
+
+#ifdef CONFIG_I2C
+static struct i2c_board_info nlm_i2c_board_info1[] __initdata = {
+ /* All XLR boards have this RTC and Max6657 Temp Chip */
+ [0] = {
+ .type = "ds1374",
+ .addr = 0x68
+ },
+ [1] = {
+ .type = "lm90",
+ .addr = 0x4c
+ },
+};
+
+static struct resource i2c_resources[] = {
+ [0] = {
+ .start = 0, /* filled at init */
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device nlm_xlr_i2c_1 = {
+ .name = "xlr-i2cbus",
+ .id = 1,
+ .num_resources = 1,
+ .resource = i2c_resources,
+};
+
+static int __init nlm_i2c_init(void)
+{
+ int err = 0;
+ unsigned int offset;
+
+ /* I2C bus 0 does not have any useful devices, configure only bus 1 */
+ offset = NETLOGIC_IO_I2C_1_OFFSET;
+ nlm_xlr_i2c_1.resource[0].start = CPHYSADDR(nlm_mmio_base(offset));
+ nlm_xlr_i2c_1.resource[0].end = nlm_xlr_i2c_1.resource[0].start + 0xfff;
+
+ platform_device_register(&nlm_xlr_i2c_1);
+
+ err = i2c_register_board_info(1, nlm_i2c_board_info1,
+ ARRAY_SIZE(nlm_i2c_board_info1));
+ if (err < 0)
+ pr_err("nlm-i2c: cannot register board I2C devices\n");
+ return err;
+}
+
+arch_initcall(nlm_i2c_init);
+#endif
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
new file mode 100644
index 000000000..72ceddc9a
--- /dev/null
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/kernel.h>
+#include <linux/serial_8250.h>
+#include <linux/pm.h>
+
+#include <asm/idle.h>
+#include <asm/reboot.h>
+#include <asm/time.h>
+#include <asm/bootinfo.h>
+
+#include <asm/netlogic/interrupt.h>
+#include <asm/netlogic/psb-bootinfo.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlr/xlr.h>
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/gpio.h>
+#include <asm/netlogic/xlr/fmn.h>
+
+uint64_t nlm_io_base = DEFAULT_NETLOGIC_IO_BASE;
+struct psb_info nlm_prom_info;
+
+/* default to uniprocessor */
+unsigned int nlm_threads_per_core = 1;
+struct nlm_soc_info nlm_nodes[NLM_NR_NODES];
+cpumask_t nlm_cpumask = CPU_MASK_CPU0;
+
+static void nlm_linux_exit(void)
+{
+ uint64_t gpiobase;
+
+ gpiobase = nlm_mmio_base(NETLOGIC_IO_GPIO_OFFSET);
+ /* trigger a chip reset by writing 1 to GPIO_SWRESET_REG */
+ nlm_write_reg(gpiobase, GPIO_SWRESET_REG, 1);
+ for ( ; ; )
+ cpu_wait();
+}
+
+void __init plat_mem_setup(void)
+{
+ _machine_restart = (void (*)(char *))nlm_linux_exit;
+ _machine_halt = nlm_linux_exit;
+ pm_power_off = nlm_linux_exit;
+}
+
+const char *get_system_type(void)
+{
+ return "Netlogic XLR/XLS Series";
+}
+
+unsigned int nlm_get_cpu_frequency(void)
+{
+ return (unsigned int)nlm_prom_info.cpu_frequency;
+}
+
+void __init prom_free_prom_memory(void)
+{
+ /* Nothing yet */
+}
+
+void nlm_percpu_init(int hwcpuid)
+{
+ if (hwcpuid % 4 == 0)
+ xlr_percpu_fmn_init();
+}
+
+static void __init build_arcs_cmdline(int *argv)
+{
+ int i, remain, len;
+ char *arg;
+
+ remain = sizeof(arcs_cmdline) - 1;
+ arcs_cmdline[0] = '\0';
+ for (i = 0; argv[i] != 0; i++) {
+ arg = (char *)(long)argv[i];
+ len = strlen(arg);
+ if (len + 1 > remain)
+ break;
+ strcat(arcs_cmdline, arg);
+ strcat(arcs_cmdline, " ");
+ remain -= len + 1;
+ }
+
+ /* Add the default options here */
+ if ((strstr(arcs_cmdline, "console=")) == NULL) {
+ arg = "console=ttyS0,38400 ";
+ len = strlen(arg);
+ if (len > remain)
+ goto fail;
+ strcat(arcs_cmdline, arg);
+ remain -= len;
+ }
+#ifdef CONFIG_BLK_DEV_INITRD
+ if ((strstr(arcs_cmdline, "rdinit=")) == NULL) {
+ arg = "rdinit=/sbin/init ";
+ len = strlen(arg);
+ if (len > remain)
+ goto fail;
+ strcat(arcs_cmdline, arg);
+ remain -= len;
+ }
+#endif
+ return;
+fail:
+ panic("Cannot add %s, command line too big!", arg);
+}
+
+static void prom_add_memory(void)
+{
+ struct nlm_boot_mem_map *bootm;
+ u64 start, size;
+ u64 pref_backup = 512; /* avoid pref walking beyond end */
+ int i;
+
+ bootm = (void *)(long)nlm_prom_info.psb_mem_map;
+ for (i = 0; i < bootm->nr_map; i++) {
+ if (bootm->map[i].type != BOOT_MEM_RAM)
+ continue;
+ start = bootm->map[i].addr;
+ size = bootm->map[i].size;
+
+ /* Work around for using bootloader mem */
+ if (i == 0 && start == 0 && size == 0x0c000000)
+ size = 0x0ff00000;
+
+ add_memory_region(start, size - pref_backup, BOOT_MEM_RAM);
+ }
+}
+
+static void nlm_init_node(void)
+{
+ struct nlm_soc_info *nodep;
+
+ nodep = nlm_current_node();
+ nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
+ nodep->ebase = read_c0_ebase() & MIPS_EBASE_BASE;
+ spin_lock_init(&nodep->piclock);
+}
+
+void __init prom_init(void)
+{
+ int *argv, *envp; /* passed as 32 bit ptrs */
+ struct psb_info *prom_infop;
+ void *reset_vec;
+#ifdef CONFIG_SMP
+ int i;
+#endif
+
+ /* truncate to 32 bit and sign extend all args */
+ argv = (int *)(long)(int)fw_arg1;
+ envp = (int *)(long)(int)fw_arg2;
+ prom_infop = (struct psb_info *)(long)(int)fw_arg3;
+
+ nlm_prom_info = *prom_infop;
+ nlm_init_node();
+
+ /* Update reset entry point with CPU init code */
+ reset_vec = (void *)CKSEG1ADDR(RESET_VEC_PHYS);
+ memset(reset_vec, 0, RESET_VEC_SIZE);
+ memcpy(reset_vec, (void *)nlm_reset_entry,
+ (nlm_reset_entry_end - nlm_reset_entry));
+
+ build_arcs_cmdline(argv);
+ prom_add_memory();
+
+#ifdef CONFIG_SMP
+ for (i = 0; i < 32; i++)
+ if (nlm_prom_info.online_cpu_map & (1 << i))
+ cpumask_set_cpu(i, &nlm_cpumask);
+ nlm_wakeup_secondary_cpus();
+ register_smp_ops(&nlm_smp_ops);
+#endif
+ xlr_board_info_setup();
+ xlr_percpu_fmn_init();
+}
diff --git a/arch/mips/netlogic/xlr/wakeup.c b/arch/mips/netlogic/xlr/wakeup.c
new file mode 100644
index 000000000..d61cba1e9
--- /dev/null
+++ b/arch/mips/netlogic/xlr/wakeup.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights
+ * reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the NetLogic
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/delay.h>
+#include <linux/threads.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/addrspace.h>
+#include <asm/string.h>
+
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
+#include <asm/netlogic/mips-extns.h>
+
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+
+int xlr_wakeup_secondary_cpus(void)
+{
+ struct nlm_soc_info *nodep;
+ unsigned int i, j, boot_cpu;
+ volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
+
+ /*
+ * In case of RMI boot, hit with NMI to get the cores
+ * from bootloader to linux code.
+ */
+ nodep = nlm_get_node(0);
+ boot_cpu = hard_smp_processor_id();
+ nlm_set_nmi_handler(nlm_rmiboot_preboot);
+ for (i = 0; i < NR_CPUS; i++) {
+ if (i == boot_cpu || !cpumask_test_cpu(i, &nlm_cpumask))
+ continue;
+ nlm_pic_send_ipi(nodep->picbase, i, 1, 1); /* send NMI */
+ }
+
+ /* Fill up the coremask early */
+ nodep->coremask = 1;
+ for (i = 1; i < nlm_cores_per_node(); i++) {
+ for (j = 1000000; j > 0; j--) {
+ if (cpu_ready[i * NLM_THREADS_PER_CORE])
+ break;
+ udelay(10);
+ }
+ if (j != 0)
+ nodep->coremask |= (1u << i);
+ else
+ pr_err("Failed to wakeup core %d\n", i);
+ }
+
+ return 0;
+}