diff options
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/cps-vec.S | 54 | ||||
-rw-r--r-- | arch/mips/kernel/mips-cm.c | 5 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 14 | ||||
-rw-r--r-- | arch/mips/kernel/pm-cps.c | 134 | ||||
-rw-r--r-- | arch/mips/kernel/rtlx-mt.c | 8 | ||||
-rw-r--r-- | arch/mips/kernel/setup.c | 17 | ||||
-rw-r--r-- | arch/mips/kernel/smp-cps.c | 141 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 13 | ||||
-rw-r--r-- | arch/mips/kernel/vdso.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/vpe-mt.c | 4 |
10 files changed, 218 insertions, 174 deletions
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S index 64ecfdac65..f876309130 100644 --- a/arch/mips/kernel/cps-vec.S +++ b/arch/mips/kernel/cps-vec.S @@ -4,6 +4,7 @@ * Author: Paul Burton <paul.burton@mips.com> */ +#include <linux/init.h> #include <asm/addrspace.h> #include <asm/asm.h> #include <asm/asm-offsets.h> @@ -82,39 +83,10 @@ .endm -.balign 0x1000 - -LEAF(mips_cps_core_entry) - /* - * These first several instructions will be patched by cps_smp_setup to load the - * CCA to use into register s0 and GCR base address to register s1. - */ - .rept CPS_ENTRY_PATCH_INSNS - nop - .endr - - .global mips_cps_core_entry_patch_end -mips_cps_core_entry_patch_end: - - /* Check whether we're here due to an NMI */ - mfc0 k0, CP0_STATUS - and k0, k0, ST0_NMI - beqz k0, not_nmi - nop - - /* This is an NMI */ - PTR_LA k0, nmi_handler - jr k0 - nop - -not_nmi: - /* Setup Cause */ - li t0, CAUSEF_IV - mtc0 t0, CP0_CAUSE - - /* Setup Status */ - li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS - mtc0 t0, CP0_STATUS +LEAF(mips_cps_core_boot) + /* Save CCA and GCR base */ + move s0, a0 + move s1, a1 /* We don't know how to do coherence setup on earlier ISA */ #if MIPS_ISA_REV > 0 @@ -178,49 +150,45 @@ not_nmi: PTR_L sp, VPEBOOTCFG_SP(v1) jr t1 nop - END(mips_cps_core_entry) + END(mips_cps_core_boot) -.org 0x200 + __INIT LEAF(excep_tlbfill) DUMP_EXCEP("TLB Fill") b . nop END(excep_tlbfill) -.org 0x280 LEAF(excep_xtlbfill) DUMP_EXCEP("XTLB Fill") b . nop END(excep_xtlbfill) -.org 0x300 LEAF(excep_cache) DUMP_EXCEP("Cache") b . nop END(excep_cache) -.org 0x380 LEAF(excep_genex) DUMP_EXCEP("General") b . nop END(excep_genex) -.org 0x400 LEAF(excep_intex) DUMP_EXCEP("Interrupt") b . nop END(excep_intex) -.org 0x480 LEAF(excep_ejtag) PTR_LA k0, ejtag_debug_handler jr k0 nop END(excep_ejtag) + __FINIT LEAF(mips_cps_core_init) #ifdef CONFIG_MIPS_MT_SMP @@ -428,7 +396,7 @@ LEAF(mips_cps_boot_vpes) /* Calculate a pointer to the VPEs struct vpe_boot_config */ li t0, VPEBOOTCFG_SIZE mul t0, t0, ta1 - addu t0, t0, ta3 + PTR_ADDU t0, t0, ta3 /* Set the TC restart PC */ lw t1, VPEBOOTCFG_PC(t0) @@ -603,10 +571,10 @@ dcache_done: lw $1, TI_CPU(gp) sll $1, $1, LONGLOG PTR_LA \dest, __per_cpu_offset - addu $1, $1, \dest + PTR_ADDU $1, $1, \dest lw $1, 0($1) PTR_LA \dest, cps_cpu_state - addu \dest, \dest, $1 + PTR_ADDU \dest, \dest, $1 .set pop .endm diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c index 268ac0b811..3a115fab55 100644 --- a/arch/mips/kernel/mips-cm.c +++ b/arch/mips/kernel/mips-cm.c @@ -179,7 +179,7 @@ static char *cm3_causes[32] = { static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock); static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags); -phys_addr_t __mips_cm_phys_base(void) +phys_addr_t __weak mips_cm_phys_base(void) { unsigned long cmgcr; @@ -198,9 +198,6 @@ phys_addr_t __mips_cm_phys_base(void) return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); } -phys_addr_t mips_cm_phys_base(void) - __attribute__((weak, alias("__mips_cm_phys_base"))); - phys_addr_t __weak mips_cm_l2sync_phys_base(void) { u32 base_reg; diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index c07d64438b..c938ba208f 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c @@ -229,19 +229,13 @@ void mips_mt_set_cpuoptions(void) } } -struct class *mt_class; +const struct class mt_class = { + .name = "mt", +}; static int __init mips_mt_init(void) { - struct class *mtc; - - mtc = class_create("mt"); - if (IS_ERR(mtc)) - return PTR_ERR(mtc); - - mt_class = mtc; - - return 0; + return class_register(&mt_class); } subsys_initcall(mips_mt_init); diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c index 9bf60d7d44..d09ca77e62 100644 --- a/arch/mips/kernel/pm-cps.c +++ b/arch/mips/kernel/pm-cps.c @@ -18,6 +18,7 @@ #include <asm/mipsmtregs.h> #include <asm/pm.h> #include <asm/pm-cps.h> +#include <asm/regdef.h> #include <asm/smp-cps.h> #include <asm/uasm.h> @@ -69,13 +70,6 @@ DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); static struct uasm_label labels[32]; static struct uasm_reloc relocs[32]; -enum mips_reg { - zero, at, v0, v1, a0, a1, a2, a3, - t0, t1, t2, t3, t4, t5, t6, t7, - s0, s1, s2, s3, s4, s5, s6, s7, - t8, t9, k0, k1, gp, sp, fp, ra, -}; - bool cps_pm_support_state(enum cps_pm_state state) { return test_bit(state, state_support); @@ -203,13 +197,13 @@ static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, return; /* Load base address */ - UASM_i_LA(pp, t0, (long)CKSEG0); + UASM_i_LA(pp, GPR_T0, (long)CKSEG0); /* Calculate end address */ if (cache_size < 0x8000) - uasm_i_addiu(pp, t1, t0, cache_size); + uasm_i_addiu(pp, GPR_T1, GPR_T0, cache_size); else - UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); + UASM_i_LA(pp, GPR_T1, (long)(CKSEG0 + cache_size)); /* Start of cache op loop */ uasm_build_label(pl, *pp, lbl); @@ -217,19 +211,19 @@ static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, /* Generate the cache ops */ for (i = 0; i < unroll_lines; i++) { if (cpu_has_mips_r6) { - uasm_i_cache(pp, op, 0, t0); - uasm_i_addiu(pp, t0, t0, cache->linesz); + uasm_i_cache(pp, op, 0, GPR_T0); + uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz); } else { - uasm_i_cache(pp, op, i * cache->linesz, t0); + uasm_i_cache(pp, op, i * cache->linesz, GPR_T0); } } if (!cpu_has_mips_r6) /* Update the base address */ - uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); + uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz); /* Loop if we haven't reached the end address yet */ - uasm_il_bne(pp, pr, t0, t1, lbl); + uasm_il_bne(pp, pr, GPR_T0, GPR_T1, lbl); uasm_i_nop(pp); } @@ -275,25 +269,25 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, */ /* Preserve perf counter setup */ - uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ - uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mfc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mfc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ /* Setup perf counter to count FSB full pipeline stalls */ - uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); - uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_addiu(pp, GPR_T0, GPR_ZERO, (perf_event << 5) | 0xf); + uasm_i_mtc0(pp, GPR_T0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_ehb(pp); - uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mtc0(pp, GPR_ZERO, 25, (perf_counter * 2) + 1); /* PerfCntN */ uasm_i_ehb(pp); /* Base address for loads */ - UASM_i_LA(pp, t0, (long)CKSEG0); + UASM_i_LA(pp, GPR_T0, (long)CKSEG0); /* Start of clear loop */ uasm_build_label(pl, *pp, lbl); /* Perform some loads to fill the FSB */ for (i = 0; i < num_loads; i++) - uasm_i_lw(pp, zero, i * line_size * line_stride, t0); + uasm_i_lw(pp, GPR_ZERO, i * line_size * line_stride, GPR_T0); /* * Invalidate the new D-cache entries so that the cache will need @@ -301,9 +295,9 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, */ for (i = 0; i < num_loads; i++) { uasm_i_cache(pp, Hit_Invalidate_D, - i * line_size * line_stride, t0); + i * line_size * line_stride, GPR_T0); uasm_i_cache(pp, Hit_Writeback_Inv_SD, - i * line_size * line_stride, t0); + i * line_size * line_stride, GPR_T0); } /* Barrier ensuring previous cache invalidates are complete */ @@ -311,16 +305,16 @@ static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, uasm_i_ehb(pp); /* Check whether the pipeline stalled due to the FSB being full */ - uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mfc0(pp, GPR_T1, 25, (perf_counter * 2) + 1); /* PerfCntN */ /* Loop if it didn't */ - uasm_il_beqz(pp, pr, t1, lbl); + uasm_il_beqz(pp, pr, GPR_T1, lbl); uasm_i_nop(pp); /* Restore perf counter 1. The count may well now be wrong... */ - uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ + uasm_i_mtc0(pp, GPR_T2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ uasm_i_ehb(pp); - uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ + uasm_i_mtc0(pp, GPR_T3, 25, (perf_counter * 2) + 1); /* PerfCntN */ uasm_i_ehb(pp); return 0; @@ -330,12 +324,12 @@ static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, struct uasm_reloc **pr, unsigned r_addr, int lbl) { - uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); + uasm_i_lui(pp, GPR_T0, uasm_rel_hi(0x80000000)); uasm_build_label(pl, *pp, lbl); - uasm_i_ll(pp, t1, 0, r_addr); - uasm_i_or(pp, t1, t1, t0); - uasm_i_sc(pp, t1, 0, r_addr); - uasm_il_beqz(pp, pr, t1, lbl); + uasm_i_ll(pp, GPR_T1, 0, r_addr); + uasm_i_or(pp, GPR_T1, GPR_T1, GPR_T0); + uasm_i_sc(pp, GPR_T1, 0, r_addr); + uasm_il_beqz(pp, pr, GPR_T1, lbl); uasm_i_nop(pp); } @@ -344,9 +338,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *buf, *p; - const unsigned r_online = a0; - const unsigned r_nc_count = a1; - const unsigned r_pcohctl = t7; + const unsigned r_online = GPR_A0; + const unsigned r_nc_count = GPR_A1; + const unsigned r_pcohctl = GPR_T8; const unsigned max_instrs = 256; unsigned cpc_cmd; int err; @@ -383,8 +377,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * with the return address placed in v0 to avoid clobbering * the ra register before it is saved. */ - UASM_i_LA(&p, t0, (long)mips_cps_pm_save); - uasm_i_jalr(&p, v0, t0); + UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save); + uasm_i_jalr(&p, GPR_V0, GPR_T0); uasm_i_nop(&p); } @@ -399,11 +393,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) /* Increment ready_count */ uasm_i_sync(&p, __SYNC_mb); uasm_build_label(&l, p, lbl_incready); - uasm_i_ll(&p, t1, 0, r_nc_count); - uasm_i_addiu(&p, t2, t1, 1); - uasm_i_sc(&p, t2, 0, r_nc_count); - uasm_il_beqz(&p, &r, t2, lbl_incready); - uasm_i_addiu(&p, t1, t1, 1); + uasm_i_ll(&p, GPR_T1, 0, r_nc_count); + uasm_i_addiu(&p, GPR_T2, GPR_T1, 1); + uasm_i_sc(&p, GPR_T2, 0, r_nc_count); + uasm_il_beqz(&p, &r, GPR_T2, lbl_incready); + uasm_i_addiu(&p, GPR_T1, GPR_T1, 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); @@ -412,7 +406,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * If this is the last VPE to become ready for non-coherence * then it should branch below. */ - uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); + uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence); uasm_i_nop(&p); if (state < CPS_PM_POWER_GATED) { @@ -422,13 +416,13 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * has been disabled before proceeding, which it will do * by polling for the top bit of ready_count being set. */ - uasm_i_addiu(&p, t1, zero, -1); + uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1); uasm_build_label(&l, p, lbl_poll_cont); - uasm_i_lw(&p, t0, 0, r_nc_count); - uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); + uasm_i_lw(&p, GPR_T0, 0, r_nc_count); + uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont); uasm_i_ehb(&p); if (cpu_has_mipsmt) - uasm_i_yield(&p, zero, t1); + uasm_i_yield(&p, GPR_ZERO, GPR_T1); uasm_il_b(&p, &r, lbl_poll_cont); uasm_i_nop(&p); } else { @@ -438,16 +432,16 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) */ if (cpu_has_mipsmt) { /* Halt the VPE via C0 tchalt register */ - uasm_i_addiu(&p, t0, zero, TCHALT_H); - uasm_i_mtc0(&p, t0, 2, 4); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H); + uasm_i_mtc0(&p, GPR_T0, 2, 4); } else if (cpu_has_vp) { /* Halt the VP via the CPC VP_STOP register */ unsigned int vpe_id; vpe_id = cpu_vpe_id(&cpu_data[cpu]); - uasm_i_addiu(&p, t0, zero, 1 << vpe_id); - UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop()); - uasm_i_sw(&p, t0, 0, t1); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id); + UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop()); + uasm_i_sw(&p, GPR_T0, 0, GPR_T1); } else { BUG(); } @@ -482,9 +476,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * defined by the interAptiv & proAptiv SUMs as ensuring that the * operation resulting from the preceding store is complete. */ - uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu])); - uasm_i_sw(&p, t0, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu])); + uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ uasm_i_sync(&p, __SYNC_full); @@ -492,8 +486,8 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* Disable coherence */ - uasm_i_sw(&p, zero, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); if (state >= CPS_PM_CLOCK_GATED) { err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], @@ -515,9 +509,9 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* Issue the CPC command */ - UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); - uasm_i_addiu(&p, t1, zero, cpc_cmd); - uasm_i_sw(&p, t1, 0, t0); + UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd()); + uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd); + uasm_i_sw(&p, GPR_T1, 0, GPR_T0); if (state == CPS_PM_POWER_GATED) { /* If anything goes wrong just hang */ @@ -564,12 +558,12 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) * will run this. The first will actually re-enable coherence & the * rest will just be performing a rather unusual nop. */ - uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 + uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3 ? CM_GCR_Cx_COHERENCE_COHDOMAINEN : CM3_GCR_Cx_COHERENCE_COHEN); - uasm_i_sw(&p, t0, 0, r_pcohctl); - uasm_i_lw(&p, t0, 0, r_pcohctl); + uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); + uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); /* Barrier to ensure write to coherence control is complete */ uasm_i_sync(&p, __SYNC_full); @@ -579,11 +573,11 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) /* Decrement ready_count */ uasm_build_label(&l, p, lbl_decready); uasm_i_sync(&p, __SYNC_mb); - uasm_i_ll(&p, t1, 0, r_nc_count); - uasm_i_addiu(&p, t2, t1, -1); - uasm_i_sc(&p, t2, 0, r_nc_count); - uasm_il_beqz(&p, &r, t2, lbl_decready); - uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); + uasm_i_ll(&p, GPR_T1, 0, r_nc_count); + uasm_i_addiu(&p, GPR_T2, GPR_T1, -1); + uasm_i_sc(&p, GPR_T2, 0, r_nc_count); + uasm_il_beqz(&p, &r, GPR_T2, lbl_decready); + uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1); /* Barrier ensuring all CPUs see the updated r_nc_count value */ uasm_i_sync(&p, __SYNC_mb); @@ -612,7 +606,7 @@ static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) } /* The core is coherent, time to return to C code */ - uasm_i_jr(&p, ra); + uasm_i_jr(&p, GPR_RA); uasm_i_nop(&p); gen_done: diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c index 38c6925a1b..ff7535de42 100644 --- a/arch/mips/kernel/rtlx-mt.c +++ b/arch/mips/kernel/rtlx-mt.c @@ -95,11 +95,11 @@ int __init rtlx_module_init(void) atomic_set(&channel_wqs[i].in_open, 0); mutex_init(&channel_wqs[i].mutex); - dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, + dev = device_create(&mt_class, NULL, MKDEV(major, i), NULL, "%s%d", RTLX_MODULE_NAME, i); if (IS_ERR(dev)) { while (i--) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); err = PTR_ERR(dev); goto out_chrdev; @@ -127,7 +127,7 @@ int __init rtlx_module_init(void) out_class: for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); out_chrdev: unregister_chrdev(major, RTLX_MODULE_NAME); @@ -139,7 +139,7 @@ void __exit rtlx_module_exit(void) int i; for (i = 0; i < RTLX_CHANNELS; i++) - device_destroy(mt_class, MKDEV(major, i)); + device_destroy(&mt_class, MKDEV(major, i)); unregister_chrdev(major, RTLX_MODULE_NAME); diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index 9c30de1515..12a1a4ffb6 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -442,8 +442,6 @@ static void __init mips_reserve_vmcore(void) #endif } -#ifdef CONFIG_KEXEC - /* 64M alignment for crash kernel regions */ #define CRASH_ALIGN SZ_64M #define CRASH_ADDR_MAX SZ_512M @@ -454,6 +452,9 @@ static void __init mips_parse_crashkernel(void) unsigned long long crash_size, crash_base; int ret; + if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) + return; + total_mem = memblock_phys_mem_size(); ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base, @@ -489,6 +490,9 @@ static void __init request_crashkernel(struct resource *res) { int ret; + if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) + return; + if (crashk_res.start == crashk_res.end) return; @@ -498,15 +502,6 @@ static void __init request_crashkernel(struct resource *res) (unsigned long)(resource_size(&crashk_res) >> 20), (unsigned long)(crashk_res.start >> 20)); } -#else /* !defined(CONFIG_KEXEC) */ -static void __init mips_parse_crashkernel(void) -{ -} - -static void __init request_crashkernel(struct resource *res) -{ -} -#endif /* !defined(CONFIG_KEXEC) */ static void __init check_kernel_sections_mem(void) { diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c index f6c37d407f..9cc087dd1c 100644 --- a/arch/mips/kernel/smp-cps.c +++ b/arch/mips/kernel/smp-cps.c @@ -7,6 +7,7 @@ #include <linux/cpu.h> #include <linux/delay.h> #include <linux/io.h> +#include <linux/memblock.h> #include <linux/sched/task_stack.h> #include <linux/sched/hotplug.h> #include <linux/slab.h> @@ -20,12 +21,24 @@ #include <asm/mipsregs.h> #include <asm/pm-cps.h> #include <asm/r4kcache.h> +#include <asm/regdef.h> #include <asm/smp.h> #include <asm/smp-cps.h> #include <asm/time.h> #include <asm/uasm.h> +#define BEV_VEC_SIZE 0x500 +#define BEV_VEC_ALIGN 0x1000 + +enum label_id { + label_not_nmi = 1, +}; + +UASM_L_LA(_not_nmi) + static DECLARE_BITMAP(core_power, NR_CPUS); +static uint32_t core_entry_reg; +static phys_addr_t cps_vec_pa; struct core_boot_config *mips_cps_core_bootcfg; @@ -34,10 +47,100 @@ static unsigned __init core_vpe_count(unsigned int cluster, unsigned core) return min(smp_max_threads, mips_cps_numvps(cluster, core)); } +static void __init *mips_cps_build_core_entry(void *addr) +{ + extern void (*nmi_handler)(void); + u32 *p = addr; + u32 val; + struct uasm_label labels[2]; + struct uasm_reloc relocs[2]; + struct uasm_label *l = labels; + struct uasm_reloc *r = relocs; + + memset(labels, 0, sizeof(labels)); + memset(relocs, 0, sizeof(relocs)); + + uasm_i_mfc0(&p, GPR_K0, C0_STATUS); + UASM_i_LA(&p, GPR_T9, ST0_NMI); + uasm_i_and(&p, GPR_K0, GPR_K0, GPR_T9); + + uasm_il_bnez(&p, &r, GPR_K0, label_not_nmi); + uasm_i_nop(&p); + UASM_i_LA(&p, GPR_K0, (long)&nmi_handler); + + uasm_l_not_nmi(&l, p); + + val = CAUSEF_IV; + uasm_i_lui(&p, GPR_K0, val >> 16); + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); + uasm_i_mtc0(&p, GPR_K0, C0_CAUSE); + val = ST0_CU1 | ST0_CU0 | ST0_BEV | ST0_KX_IF_64; + uasm_i_lui(&p, GPR_K0, val >> 16); + uasm_i_ori(&p, GPR_K0, GPR_K0, val & 0xffff); + uasm_i_mtc0(&p, GPR_K0, C0_STATUS); + uasm_i_ehb(&p); + uasm_i_ori(&p, GPR_A0, 0, read_c0_config() & CONF_CM_CMASK); + UASM_i_LA(&p, GPR_A1, (long)mips_gcr_base); +#if defined(KBUILD_64BIT_SYM32) || defined(CONFIG_32BIT) + UASM_i_LA(&p, GPR_T9, CKSEG1ADDR(__pa_symbol(mips_cps_core_boot))); +#else + UASM_i_LA(&p, GPR_T9, TO_UNCAC(__pa_symbol(mips_cps_core_boot))); +#endif + uasm_i_jr(&p, GPR_T9); + uasm_i_nop(&p); + + uasm_resolve_relocs(relocs, labels); + + return p; +} + +static int __init allocate_cps_vecs(void) +{ + /* Try to allocate in KSEG1 first */ + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, + 0x0, CSEGX_SIZE - 1); + + if (cps_vec_pa) + core_entry_reg = CKSEG1ADDR(cps_vec_pa) & + CM_GCR_Cx_RESET_BASE_BEVEXCBASE; + + if (!cps_vec_pa && mips_cm_is64) { + cps_vec_pa = memblock_phys_alloc_range(BEV_VEC_SIZE, BEV_VEC_ALIGN, + 0x0, SZ_4G - 1); + if (cps_vec_pa) + core_entry_reg = (cps_vec_pa & CM_GCR_Cx_RESET_BASE_BEVEXCBASE) | + CM_GCR_Cx_RESET_BASE_MODE; + } + + if (!cps_vec_pa) + return -ENOMEM; + + return 0; +} + +static void __init setup_cps_vecs(void) +{ + void *cps_vec; + + cps_vec = (void *)CKSEG1ADDR_OR_64BIT(cps_vec_pa); + mips_cps_build_core_entry(cps_vec); + + memcpy(cps_vec + 0x200, &excep_tlbfill, 0x80); + memcpy(cps_vec + 0x280, &excep_xtlbfill, 0x80); + memcpy(cps_vec + 0x300, &excep_cache, 0x80); + memcpy(cps_vec + 0x380, &excep_genex, 0x80); + memcpy(cps_vec + 0x400, &excep_intex, 0x80); + memcpy(cps_vec + 0x480, &excep_ejtag, 0x80); + + /* Make sure no prefetched data in cache */ + blast_inv_dcache_range(CKSEG0ADDR_OR_64BIT(cps_vec_pa), CKSEG0ADDR_OR_64BIT(cps_vec_pa) + BEV_VEC_SIZE); + bc_inv(CKSEG0ADDR_OR_64BIT(cps_vec_pa), BEV_VEC_SIZE); + __sync(); +} + static void __init cps_smp_setup(void) { unsigned int nclusters, ncores, nvpes, core_vpes; - unsigned long core_entry; int cl, c, v; /* Detect & record VPE topology */ @@ -94,10 +197,11 @@ static void __init cps_smp_setup(void) /* Make core 0 coherent with everything */ write_gcr_cl_coherence(0xff); - if (mips_cm_revision() >= CM_REV_CM3) { - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); - write_gcr_bev_base(core_entry); - } + if (allocate_cps_vecs()) + pr_err("Failed to allocate CPS vectors\n"); + + if (core_entry_reg && mips_cm_revision() >= CM_REV_CM3) + write_gcr_bev_base(core_entry_reg); #ifdef CONFIG_MIPS_MT_FPAFF /* If we have an FPU, enroll ourselves in the FPU-full mask */ @@ -110,10 +214,14 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) { unsigned ncores, core_vpes, c, cca; bool cca_unsuitable, cores_limited; - u32 *entry_code; mips_mt_set_cpuoptions(); + if (!core_entry_reg) { + pr_err("core_entry address unsuitable, disabling smp-cps\n"); + goto err_out; + } + /* Detect whether the CCA is unsuited to multi-core SMP */ cca = read_c0_config() & CONF_CM_CMASK; switch (cca) { @@ -145,20 +253,7 @@ static void __init cps_prepare_cpus(unsigned int max_cpus) (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", cpu_has_dc_aliases ? "dcache aliasing" : ""); - /* - * Patch the start of mips_cps_core_entry to provide: - * - * s0 = kseg0 CCA - */ - entry_code = (u32 *)&mips_cps_core_entry; - uasm_i_addiu(&entry_code, 16, 0, cca); - UASM_i_LA(&entry_code, 17, (long)mips_gcr_base); - BUG_ON((void *)entry_code > (void *)&mips_cps_core_entry_patch_end); - blast_dcache_range((unsigned long)&mips_cps_core_entry, - (unsigned long)entry_code); - bc_wback_inv((unsigned long)&mips_cps_core_entry, - (void *)entry_code - (void *)&mips_cps_core_entry); - __sync(); + setup_cps_vecs(); /* Allocate core boot configuration structs */ ncores = mips_cps_numcores(0); @@ -213,7 +308,7 @@ static void boot_core(unsigned int core, unsigned int vpe_id) mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); /* Set its reset vector */ - write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); + write_gcr_co_reset_base(core_entry_reg); /* Ensure its coherency is disabled */ write_gcr_co_coherence(0); @@ -290,7 +385,6 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; - unsigned long core_entry; unsigned int remote; int err; @@ -314,8 +408,7 @@ static int cps_boot_secondary(int cpu, struct task_struct *idle) if (cpu_has_vp) { mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); - core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); - write_gcr_co_reset_base(core_entry); + write_gcr_co_reset_base(core_entry_reg); mips_cm_unlock_other(); } diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index a1c1cb5de9..dc29bd9656 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c @@ -58,6 +58,7 @@ #include <asm/module.h> #include <asm/msa.h> #include <asm/ptrace.h> +#include <asm/regdef.h> #include <asm/sections.h> #include <asm/siginfo.h> #include <asm/tlbdebug.h> @@ -2041,13 +2042,12 @@ void __init *set_except_vector(int n, void *addr) unsigned long jump_mask = ~((1 << 28) - 1); #endif u32 *buf = (u32 *)(ebase + 0x200); - unsigned int k0 = 26; if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { uasm_i_j(&buf, handler & ~jump_mask); uasm_i_nop(&buf); } else { - UASM_i_LA(&buf, k0, handler); - uasm_i_jr(&buf, k0); + UASM_i_LA(&buf, GPR_K0, handler); + uasm_i_jr(&buf, GPR_K0); uasm_i_nop(&buf); } local_flush_icache_range(ebase + 0x200, (unsigned long)buf); @@ -2299,7 +2299,7 @@ static const char panic_null_cerr[] = void set_uncached_handler(unsigned long offset, void *addr, unsigned long size) { - unsigned long uncached_ebase = CKSEG1ADDR(ebase); + unsigned long uncached_ebase = CKSEG1ADDR_OR_64BIT(__pa(ebase)); if (!addr) panic(panic_null_cerr); @@ -2351,10 +2351,13 @@ void __init trap_init(void) * EVA is special though as it allows segments to be rearranged * and to become uncached during cache error handling. */ - if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) + if (!IS_ENABLED(CONFIG_EVA) && ebase_pa < 0x20000000) ebase = CKSEG0ADDR(ebase_pa); else ebase = (unsigned long)phys_to_virt(ebase_pa); + if (ebase_pa >= 0x20000000) + pr_warn("ebase(%pa) should better be in KSeg0", + &ebase_pa); } if (cpu_has_mmips) { diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c index f6d40e43f1..dda36fa263 100644 --- a/arch/mips/kernel/vdso.c +++ b/arch/mips/kernel/vdso.c @@ -24,7 +24,7 @@ #include <vdso/vsyscall.h> /* Kernel-provided data used by the VDSO. */ -static union mips_vdso_data mips_vdso_data __page_aligned_data; +static union vdso_data_store mips_vdso_data __page_aligned_data; struct vdso_data *vdso_data = mips_vdso_data.data; /* diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c index 667bc75f64..84124ac2d2 100644 --- a/arch/mips/kernel/vpe-mt.c +++ b/arch/mips/kernel/vpe-mt.c @@ -95,8 +95,8 @@ int vpe_run(struct vpe *v) * We don't pass the memsize here, so VPE programs need to be * compiled with DFLT_STACK_SIZE and DFLT_HEAP_SIZE defined. */ - mttgpr(7, 0); - mttgpr(6, v->ntcs); + mttgpr($7, 0); + mttgpr($6, v->ntcs); /* set up VPE1 */ /* |